import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense, LSTM, Dropout, Flatten
from keras.callbacks import EarlyStopping
import tensorflow as tf
from pandas_datareader import data
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import r2_score
import pickle
#Defining MAPE function
def MAPE(Y_actual,Y_Predicted):
mape = np.mean(np.abs((Y_actual - Y_Predicted)/Y_actual))*100
return mape
Using TensorFlow backend.
data = data.DataReader("^GSPC", start='2012-03-31', end='2020-03-31', data_source='yahoo') data
# create table for results
columns = ['RMSE','MSE','MAE','MAPE','R2', 'error_abs_sum','rate of change acc']
df_result = pd.DataFrame(index=columns)
data = data.DataReader("SPY",
start='2000-01-01',
end='2020-10-31',
data_source='yahoo')
data['Date'] = data.index
data = data.interpolate(method='polynomial', order=2)
data
| High | Low | Open | Close | Volume | Adj Close | Date | |
|---|---|---|---|---|---|---|---|
| Date | |||||||
| 1999-12-31 | 147.500000 | 146.250000 | 146.843750 | 146.875000 | 3172700.0 | 99.538696 | 1999-12-31 |
| 2000-01-03 | 148.250000 | 143.875000 | 148.250000 | 145.437500 | 8164300.0 | 98.564461 | 2000-01-03 |
| 2000-01-04 | 144.062500 | 139.640625 | 143.531250 | 139.750000 | 8089800.0 | 94.709984 | 2000-01-04 |
| 2000-01-05 | 141.531250 | 137.250000 | 139.937500 | 140.000000 | 12177900.0 | 94.879379 | 2000-01-05 |
| 2000-01-06 | 141.500000 | 137.750000 | 139.625000 | 137.750000 | 6227200.0 | 93.354584 | 2000-01-06 |
| ... | ... | ... | ... | ... | ... | ... | ... |
| 2020-10-26 | 342.980011 | 335.619995 | 342.130005 | 339.390015 | 91473000.0 | 339.390015 | 2020-10-26 |
| 2020-10-27 | 340.119995 | 337.989990 | 339.760010 | 338.220001 | 65994100.0 | 338.220001 | 2020-10-27 |
| 2020-10-28 | 338.250000 | 326.130005 | 332.100006 | 326.660004 | 127094300.0 | 326.660004 | 2020-10-28 |
| 2020-10-29 | 333.399994 | 325.089996 | 326.910004 | 329.980011 | 90597700.0 | 329.980011 | 2020-10-29 |
| 2020-10-30 | 329.690002 | 322.600006 | 328.279999 | 326.540009 | 120287300.0 | 326.540009 | 2020-10-30 |
5243 rows × 7 columns
isempty = data.empty
isempty
False
l = len(data)
split = round(data.shape[0]/3)
data_training = data[:-split]
data_test = data[-split:]
print(data_training.shape)
print(data_test.shape)
plt.figure()
data_training['Adj Close'].plot()
data_test['Adj Close'].plot()
(3495, 7) (1748, 7)
<AxesSubplot:xlabel='Date'>
scaler = MinMaxScaler(feature_range = (0, 1))
scaler_single = MinMaxScaler(feature_range = (0, 1))
scaled_data = np.concatenate([scaler_single.fit_transform(data_training[['Adj Close']]),
scaler.fit_transform(data_training[['High','Low','Open','Close','Volume']]),
], axis = 1)
print(scaled_data.shape)
scaled_data_test = np.concatenate([scaler_single.fit_transform(data_test[['Adj Close']]),
scaler.fit_transform(data_test[['High','Low','Open','Close','Volume']]),
], axis = 1)
print(scaled_data_test.shape)
(3495, 6) (1748, 6)
X_train = []
y_train = []
lookback = 64
lookforward = 1 #
#lookforward = 0
ilast = 6
for i in range(lookback, scaled_data.shape[0]-lookforward-4):
X_train.append(scaled_data[i-lookback:i][:,:ilast])
y_train.append(scaled_data[i+lookforward, 0])
X_train, y_train = np.array(X_train), np.array(y_train)
print(X_train.shape)
print(y_train.shape)
#========================================================================================================================
X_test = []
y_test = []
X_test_p= []
for i in range(lookback, scaled_data_test.shape[0]-lookforward-4):
#print(i-lookback)
#print(i)
#print('-----------')
X_test.append(scaled_data_test[i-lookback:i][:,:ilast])
y_test.append(
scaled_data_test[i+lookforward, 0],
)
X_test_p.append(data_test[i-lookback:i])
X_test, y_test = np.array(X_test), np.array(y_test)
print(X_test.shape)
print(y_test.shape)
print(X_test[0])
print(y_test)
(3426, 64, 6) (3426,) (1679, 64, 6) (1679,) [[0.02051685 0.0245101 0.02865565 0.02313768 0.03127557 0.14902429] [0.02436839 0.02876797 0.03371911 0.02871499 0.03617937 0.12531667] [0.02359817 0.03062395 0.03705758 0.03506547 0.03519864 0.12159985] [0.02381215 0.0308969 0.0372802 0.03280138 0.03547101 0.1370175 ] [0.02569499 0.0310061 0.03861556 0.03362967 0.03786845 0.07912122] [0.02518139 0.03379007 0.03945024 0.03611469 0.03721463 0.07310632] [0.0231702 0.03204321 0.03638991 0.03484454 0.03465374 0.16316261] [0.01983235 0.02636608 0.03038055 0.02849414 0.03040376 0.19773733] [0.01974663 0.02685735 0.02581794 0.02385558 0.03029476 0.21102346] [0.01636577 0.02281788 0.0281549 0.02556742 0.02599032 0.17796422] [0.0249247 0.03029643 0.03583345 0.03252526 0.03688772 0.22066462] [0.02689317 0.03335335 0.04145336 0.03694297 0.03939408 0.10237501] [0.02411147 0.03166113 0.03855994 0.0342371 0.03585246 0.12465956] [0.01542473 0.02887717 0.02665254 0.03335363 0.0247916 0.22654378] [0.01289972 0.0180141 0.02253499 0.02131537 0.02157689 0.19568794] [0.01281423 0.01692235 0.02259068 0.02054228 0.02146789 0.17975897] [0.01756403 0.02319996 0.02887819 0.02302721 0.02751595 0.15591172] [0.01512488 0.02101646 0.02526148 0.02540177 0.02441015 0.14295621] [0.02817707 0.03368087 0.02008681 0.02286156 0.0410287 0.44075427] [0.02727828 0.03351711 0.03894948 0.03534151 0.03988452 0.23874196] [0.03179604 0.03510021 0.03817049 0.03263573 0.04026589 0.36309308] [0.03596952 0.03864841 0.04651681 0.04235462 0.04555114 0.13415082] [0.0376906 0.04066814 0.0490763 0.04285159 0.04773058 0.0515403 ] [0.04169198 0.04585408 0.05347208 0.0472693 0.05279792 0.08849543] [0.04164883 0.04705494 0.05536389 0.05146616 0.05274346 0.08531046] [0.04151996 0.0461816 0.05491874 0.05019602 0.05258001 0.07513127] [0.04526308 0.04983899 0.05686616 0.05130051 0.05732035 0.13522254] [0.03764752 0.04645455 0.04879807 0.05080345 0.04767613 0.20404958] [0.0375185 0.0438889 0.04963276 0.04666186 0.04751267 0.12551094] [0.03523804 0.04367051 0.04657242 0.04809767 0.04462487 0.18021115] [0.04005707 0.04492601 0.05141326 0.04588877 0.05072738 0.13527244] [0.04022894 0.04514441 0.05107942 0.04787674 0.05094538 0.15670704] [0.04074532 0.04678207 0.05057866 0.05152135 0.0515992 0.14459367] [0.04289676 0.04727334 0.0517471 0.0506378 0.05432355 0.16788648] [0.03235549 0.04705494 0.04245488 0.04909161 0.04097425 0.26617832] [0.04087449 0.04481689 0.04584905 0.04147106 0.05176265 0.17402582] [0.04513398 0.05120369 0.05564211 0.05146616 0.05715689 0.16069801] [0.04410153 0.04967523 0.05630979 0.0524601 0.05584918 0.10682412] [0.04074532 0.04852885 0.05347208 0.05146616 0.0515992 0.17984255] [0.04306863 0.05027571 0.05196971 0.05477938 0.05454146 0.14035895] [0.04358516 0.04918396 0.05675494 0.05361979 0.05519536 0.0841952 ] [0.03708806 0.04279708 0.04512576 0.04743496 0.04696777 0.23045754] [0.02030804 0.03329879 0.02848874 0.03766087 0.02571787 0.38689331] [0.01652198 0.02161694 0.0189739 0.02363465 0.02092299 0.32973629] [0.0210826 0.02041599 0.02453811 0.01855431 0.02669868 0.18521142] [0.0136823 0.01632188 0.01763854 0.01546194 0.01732691 0.40315742] [0.021771 0.02319996 0.02531709 0.02236459 0.02757041 0.20261459] [0.01725332 0.02036135 0.01786107 0.0123143 0.02184926 0.358146 ] [0. 0.01533925 0.00066768 0.01761556 0. 0.48168271] [0.00524924 0.00152846 0.00222565 0.00093875 0.00664742 0.29722801] [0.0043025 0. 0. 0. 0.0054487 0.29562238] [0.0142416 0.01048091 0.00840193 0.00441771 0.01803518 0.23123931] [0.02370696 0.02352747 0.02236807 0.01949306 0.03002231 0.30908654] [0.02512699 0.0246193 0.03060316 0.0271688 0.03182039 0.14774661] [0.0336032 0.03755666 0.03522138 0.02970901 0.04255433 0.20030646] [0.03399041 0.0396856 0.04451369 0.04125021 0.04304478 0.15287809] [0.0380348 0.04170533 0.03961716 0.03346401 0.04816649 0.16483868] [0.04238031 0.04803757 0.04985529 0.04450824 0.05366973 0.15653475] [0.04332682 0.04874725 0.05530819 0.05190786 0.05486845 0.1236018 ] [0.0380778 0.05125825 0.05096811 0.04958858 0.04822103 0.21819283] [0.04272466 0.048911 0.04946584 0.0468828 0.05410563 0.17398885] [0.041821 0.05093074 0.05614287 0.05339885 0.05296137 0.20092724] [0.04620967 0.05780881 0.05836852 0.0524601 0.05851907 0.19260544] [0.04590832 0.05475189 0.05853544 0.05676735 0.05813762 0.19880926]] [0.05012504 0.05214716 0.04651086 ... 0.9427279 0.94847291 0.91709675]
import pickle
filename = 'X_test'
outfile = open(filename,'wb')
pickle.dump(X_test,outfile)
outfile.close()
filename = 'y_test'
outfile = open(filename,'wb')
pickle.dump(y_test,outfile)
outfile.close()
filename = 'scaler_single'
outfile = open(filename,'wb')
pickle.dump(scaler_single,outfile)
outfile.close()
optimizer_p ='adam'
metrics_p=['mean_squared_error']
loss_p = 'mean_squared_error'
patience_p=10
monitor_p='loss'
epochs_p=2000
batch_size_p=128
outsize = 1
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense, LSTM, Dropout
from tensorflow.keras import optimizers
features = X_train.shape[2]
input_size= lookback
model = Sequential()
model.add(LSTM(units = input_size, activation = 'relu', return_sequences = True, input_shape = (X_train.shape[1], features)))
model.add(Dropout(0.2))
model.add(LSTM(units = 32, activation = 'relu'))
model.add(Dropout(0.2))
model.add(Dense(units = outsize))
model.summary()
Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= lstm (LSTM) (None, 64, 64) 18176 _________________________________________________________________ dropout (Dropout) (None, 64, 64) 0 _________________________________________________________________ lstm_1 (LSTM) (None, 32) 12416 _________________________________________________________________ dropout_1 (Dropout) (None, 32) 0 _________________________________________________________________ dense (Dense) (None, 1) 33 ================================================================= Total params: 30,625 Trainable params: 30,625 Non-trainable params: 0 _________________________________________________________________
model.compile(optimizer=optimizer_p, loss = loss_p, metrics=metrics_p)
early_stop = EarlyStopping(monitor=monitor_p, patience=patience_p, verbose=True)
history = model.fit(X_train, y_train, epochs=epochs_p, batch_size=batch_size_p, callbacks=[early_stop])
Train on 3426 samples Epoch 1/2000 3426/3426 [==============================] - 4s 1ms/sample - loss: 0.0981 - mean_squared_error: 0.0981 Epoch 2/2000 3426/3426 [==============================] - 2s 707us/sample - loss: 0.0137 - mean_squared_error: 0.0137 Epoch 3/2000 3426/3426 [==============================] - 2s 711us/sample - loss: 0.0081 - mean_squared_error: 0.0081 Epoch 4/2000 3426/3426 [==============================] - 3s 732us/sample - loss: 0.0065 - mean_squared_error: 0.0065 Epoch 5/2000 3426/3426 [==============================] - 3s 737us/sample - loss: 0.0058 - mean_squared_error: 0.0058 - loss: 0.0058 - me Epoch 6/2000 3426/3426 [==============================] - 2s 719us/sample - loss: 0.0055 - mean_squared_error: 0.0055 - loss: 0 Epoch 7/2000 3426/3426 [==============================] - 3s 746us/sample - loss: 0.0053 - mean_squared_error: 0.0053 Epoch 8/2000 3426/3426 [==============================] - 2s 729us/sample - loss: 0.0047 - mean_squared_error: 0.0047 Epoch 9/2000 3426/3426 [==============================] - 3s 750us/sample - loss: 0.0047 - mean_squared_error: 0.0047 - loss: 0.0048 - Epoch 10/2000 3426/3426 [==============================] - 3s 758us/sample - loss: 0.0043 - mean_squared_error: 0.0043 Epoch 11/2000 3426/3426 [==============================] - 2s 699us/sample - loss: 0.0045 - mean_squared_error: 0.0045 Epoch 12/2000 3426/3426 [==============================] - 3s 730us/sample - loss: 0.0043 - mean_squared_error: 0.0043 Epoch 13/2000 3426/3426 [==============================] - 2s 692us/sample - loss: 0.0039 - mean_squared_error: 0.0039 Epoch 14/2000 3426/3426 [==============================] - 3s 732us/sample - loss: 0.0039 - mean_squared_error: 0.0039 - loss: 0.0039 - mean_squar Epoch 15/2000 3426/3426 [==============================] - 3s 738us/sample - loss: 0.0042 - mean_squared_error: 0.0042 Epoch 16/2000 3426/3426 [==============================] - 3s 762us/sample - loss: 0.0039 - mean_squared_error: 0.0039 Epoch 17/2000 3426/3426 [==============================] - 3s 740us/sample - loss: 0.0038 - mean_squared_error: 0.0038 Epoch 18/2000 3426/3426 [==============================] - 3s 746us/sample - loss: 0.0041 - mean_squared_error: 0.0041 Epoch 19/2000 3426/3426 [==============================] - 3s 781us/sample - loss: 0.0036 - mean_squared_error: 0.0036 Epoch 20/2000 3426/3426 [==============================] - 3s 753us/sample - loss: 0.0038 - mean_squared_error: 0.0038 Epoch 21/2000 3426/3426 [==============================] - 3s 731us/sample - loss: 0.0035 - mean_squared_error: 0.0035 Epoch 22/2000 3426/3426 [==============================] - 3s 745us/sample - loss: 0.0035 - mean_squared_error: 0.0035 Epoch 23/2000 3426/3426 [==============================] - 2s 712us/sample - loss: 0.0033 - mean_squared_error: 0.0033 Epoch 24/2000 3426/3426 [==============================] - 2s 708us/sample - loss: 0.0037 - mean_squared_error: 0.0037 Epoch 25/2000 3426/3426 [==============================] - 2s 714us/sample - loss: 0.0032 - mean_squared_error: 0.0032 Epoch 26/2000 3426/3426 [==============================] - 3s 736us/sample - loss: 0.0033 - mean_squared_error: 0.0033 Epoch 27/2000 3426/3426 [==============================] - 2s 716us/sample - loss: 0.0031 - mean_squared_error: 0.0031 Epoch 28/2000 3426/3426 [==============================] - 3s 734us/sample - loss: 0.0032 - mean_squared_error: 0.0032 Epoch 29/2000 3426/3426 [==============================] - 3s 751us/sample - loss: 0.0029 - mean_squared_error: 0.0029 Epoch 30/2000 3426/3426 [==============================] - 2s 723us/sample - loss: 0.0028 - mean_squared_error: 0.0028 Epoch 31/2000 3426/3426 [==============================] - 2s 676us/sample - loss: 0.0030 - mean_squared_error: 0.0030 Epoch 32/2000 3426/3426 [==============================] - 2s 691us/sample - loss: 0.0027 - mean_squared_error: 0.0027 Epoch 33/2000 3426/3426 [==============================] - 2s 679us/sample - loss: 0.0028 - mean_squared_error: 0.0028 Epoch 34/2000 3426/3426 [==============================] - 2s 709us/sample - loss: 0.0027 - mean_squared_error: 0.0027 - l Epoch 35/2000 3426/3426 [==============================] - 2s 697us/sample - loss: 0.0028 - mean_squared_error: 0.0028 Epoch 36/2000 3426/3426 [==============================] - 2s 726us/sample - loss: 0.0028 - mean_squared_error: 0.0028 Epoch 37/2000 3426/3426 [==============================] - 3s 758us/sample - loss: 0.0027 - mean_squared_error: 0.0027 Epoch 38/2000 3426/3426 [==============================] - 2s 711us/sample - loss: 0.0026 - mean_squared_error: 0.0026 Epoch 39/2000 3426/3426 [==============================] - 3s 781us/sample - loss: 0.0025 - mean_squared_error: 0.0025 - loss: 0.0025 - mean_squared_error: 0.00 Epoch 40/2000 3426/3426 [==============================] - 3s 760us/sample - loss: 0.0024 - mean_squared_error: 0.0024 Epoch 41/2000 3426/3426 [==============================] - 3s 752us/sample - loss: 0.0025 - mean_squared_error: 0.0025 Epoch 42/2000 3426/3426 [==============================] - 3s 752us/sample - loss: 0.0025 - mean_squared_error: 0.0025 Epoch 43/2000 3426/3426 [==============================] - 3s 805us/sample - loss: 0.0025 - mean_squared_error: 0.0025 Epoch 44/2000 3426/3426 [==============================] - 3s 767us/sample - loss: 0.0026 - mean_squared_error: 0.0026 - loss: 0.0025 - mean_squar - ETA: 0s - loss: 0.0025 - mean_s Epoch 45/2000 3426/3426 [==============================] - 3s 783us/sample - loss: 0.0024 - mean_squared_error: 0.0024 Epoch 46/2000 3426/3426 [==============================] - 3s 776us/sample - loss: 0.0023 - mean_squared_error: 0.0023 Epoch 47/2000 3426/3426 [==============================] - 3s 736us/sample - loss: 0.0023 - mean_squared_error: 0.0023 Epoch 48/2000 3426/3426 [==============================] - 3s 731us/sample - loss: 0.0022 - mean_squared_error: 0.0022 Epoch 49/2000 3426/3426 [==============================] - 3s 752us/sample - loss: 0.0023 - mean_squared_error: 0.0023 Epoch 50/2000 3426/3426 [==============================] - 2s 714us/sample - loss: 0.0022 - mean_squared_error: 0.0022 Epoch 51/2000 3426/3426 [==============================] - 3s 740us/sample - loss: 0.0021 - mean_squared_error: 0.0021 Epoch 52/2000 3426/3426 [==============================] - 3s 745us/sample - loss: 0.0023 - mean_squared_error: 0.0023 Epoch 53/2000 3426/3426 [==============================] - 2s 729us/sample - loss: 0.0020 - mean_squared_error: 0.0020 Epoch 54/2000 3426/3426 [==============================] - 3s 745us/sample - loss: 0.0020 - mean_squared_error: 0.0020 Epoch 55/2000 3426/3426 [==============================] - 2s 710us/sample - loss: 0.0022 - mean_squared_error: 0.0022 Epoch 56/2000 3426/3426 [==============================] - 2s 728us/sample - loss: 0.0022 - mean_squared_error: 0.0022 Epoch 57/2000 3426/3426 [==============================] - 2s 723us/sample - loss: 0.0020 - mean_squared_error: 0.0020 Epoch 58/2000 3426/3426 [==============================] - 2s 707us/sample - loss: 0.0018 - mean_squared_error: 0.0018 Epoch 59/2000 3426/3426 [==============================] - 2s 695us/sample - loss: 0.0018 - mean_squared_error: 0.0018 Epoch 60/2000 3426/3426 [==============================] - 2s 705us/sample - loss: 0.0018 - mean_squared_error: 0.0018 Epoch 61/2000 3426/3426 [==============================] - 2s 680us/sample - loss: 0.0019 - mean_squared_error: 0.0019 Epoch 62/2000 3426/3426 [==============================] - 3s 739us/sample - loss: 0.0018 - mean_squared_error: 0.0018 Epoch 63/2000 3426/3426 [==============================] - ETA: 0s - loss: 0.0018 - mean_squared_error: 0.00 - 3s 760us/sample - loss: 0.0018 - mean_squared_error: 0.0018 Epoch 64/2000 3426/3426 [==============================] - 3s 756us/sample - loss: 0.0018 - mean_squared_error: 0.0018 - loss: 0.0018 - mean_squared_error: 0. Epoch 65/2000 3426/3426 [==============================] - 2s 723us/sample - loss: 0.0018 - mean_squared_error: 0.0018 Epoch 66/2000 3426/3426 [==============================] - 3s 733us/sample - loss: 0.0018 - mean_squared_error: 0.0018 Epoch 67/2000 3426/3426 [==============================] - 2s 719us/sample - loss: 0.0016 - mean_squared_error: 0.0016 Epoch 68/2000 3426/3426 [==============================] - 2s 711us/sample - loss: 0.0017 - mean_squared_error: 0.0017 Epoch 69/2000 3426/3426 [==============================] - 2s 688us/sample - loss: 0.0016 - mean_squared_error: 0.0016 Epoch 70/2000 3426/3426 [==============================] - 2s 710us/sample - loss: 0.0015 - mean_squared_error: 0.0015 Epoch 71/2000 3426/3426 [==============================] - 3s 749us/sample - loss: 0.0016 - mean_squared_error: 0.0016 Epoch 72/2000 3426/3426 [==============================] - 3s 770us/sample - loss: 0.0015 - mean_squared_error: 0.0015 - los Epoch 73/2000 3426/3426 [==============================] - 2s 709us/sample - loss: 0.0015 - mean_squared_error: 0.0015 Epoch 74/2000 3426/3426 [==============================] - 2s 704us/sample - loss: 0.0015 - mean_squared_error: 0.0015 Epoch 75/2000 3426/3426 [==============================] - 2s 682us/sample - loss: 0.0015 - mean_squared_error: 0.0015 Epoch 76/2000 3426/3426 [==============================] - 2s 694us/sample - loss: 0.0015 - mean_squared_error: 0.0015 Epoch 77/2000 3426/3426 [==============================] - 2s 663us/sample - loss: 0.0015 - mean_squared_error: 0.0015 Epoch 78/2000 3426/3426 [==============================] - 2s 706us/sample - loss: 0.0015 - mean_squared_error: 0.0015 Epoch 79/2000 3426/3426 [==============================] - 2s 687us/sample - loss: 0.0014 - mean_squared_error: 0.0014 Epoch 80/2000 3426/3426 [==============================] - 2s 670us/sample - loss: 0.0014 - mean_squared_error: 0.0014 - loss: 0.0014 - me Epoch 81/2000 3426/3426 [==============================] - 2s 722us/sample - loss: 0.0014 - mean_squared_error: 0.0014 - loss: 0.0014 - mean_squared_err Epoch 82/2000 3426/3426 [==============================] - 2s 681us/sample - loss: 0.0015 - mean_squared_error: 0.0015 Epoch 83/2000 3426/3426 [==============================] - 2s 688us/sample - loss: 0.0014 - mean_squared_error: 0.0014 Epoch 84/2000 3426/3426 [==============================] - 2s 695us/sample - loss: 0.0013 - mean_squared_error: 0.0013 Epoch 85/2000 3426/3426 [==============================] - 2s 721us/sample - loss: 0.0013 - mean_squared_error: 0.0013 Epoch 86/2000 3426/3426 [==============================] - 3s 734us/sample - loss: 0.0014 - mean_squared_error: 0.0014 Epoch 87/2000 3426/3426 [==============================] - 3s 752us/sample - loss: 0.0014 - mean_squared_error: 0.0014 Epoch 88/2000 3426/3426 [==============================] - 3s 765us/sample - loss: 0.0013 - mean_squared_error: 0.0013 Epoch 89/2000 3426/3426 [==============================] - 3s 763us/sample - loss: 0.0013 - mean_squared_error: 0.0013 Epoch 90/2000 3426/3426 [==============================] - 3s 815us/sample - loss: 0.0013 - mean_squared_error: 0.0013 Epoch 91/2000 3426/3426 [==============================] - 3s 753us/sample - loss: 0.0014 - mean_squared_error: 0.0014 Epoch 92/2000 3426/3426 [==============================] - 3s 771us/sample - loss: 0.0014 - mean_squared_error: 0.0014 Epoch 93/2000 3426/3426 [==============================] - 3s 758us/sample - loss: 0.0012 - mean_squared_error: 0.0012 - loss: 0.0012 Epoch 94/2000 3426/3426 [==============================] - 3s 791us/sample - loss: 0.0013 - mean_squared_error: 0.0013 Epoch 95/2000 3426/3426 [==============================] - 3s 813us/sample - loss: 0.0013 - mean_squared_error: 0.0013 Epoch 96/2000 3426/3426 [==============================] - 2s 707us/sample - loss: 0.0014 - mean_squared_error: 0.0014 Epoch 97/2000 3426/3426 [==============================] - 3s 737us/sample - loss: 0.0012 - mean_squared_error: 0.0012 Epoch 98/2000 3426/3426 [==============================] - 3s 754us/sample - loss: 0.0013 - mean_squared_error: 0.0013 - loss: 0.0013 - mean_squar Epoch 99/2000 3426/3426 [==============================] - 2s 722us/sample - loss: 0.0013 - mean_squared_error: 0.0013 Epoch 100/2000 3426/3426 [==============================] - 3s 738us/sample - loss: 0.0013 - mean_squared_error: 0.0013 - l Epoch 101/2000 3426/3426 [==============================] - 3s 742us/sample - loss: 0.0012 - mean_squared_error: 0.0012 Epoch 102/2000 3426/3426 [==============================] - 3s 734us/sample - loss: 0.0012 - mean_squared_error: 0.0012 Epoch 103/2000 3426/3426 [==============================] - 2s 716us/sample - loss: 0.0012 - mean_squared_error: 0.0012 Epoch 104/2000 3426/3426 [==============================] - 2s 701us/sample - loss: 0.0011 - mean_squared_error: 0.0011 Epoch 105/2000 3426/3426 [==============================] - 3s 730us/sample - loss: 0.0012 - mean_squared_error: 0.0012 Epoch 106/2000 3426/3426 [==============================] - 2s 700us/sample - loss: 0.0012 - mean_squared_error: 0.0012 Epoch 107/2000 3426/3426 [==============================] - 2s 693us/sample - loss: 0.0013 - mean_squared_error: 0.0013 Epoch 108/2000 3426/3426 [==============================] - 2s 723us/sample - loss: 0.0012 - mean_squared_error: 0.0012 Epoch 109/2000 3426/3426 [==============================] - 3s 730us/sample - loss: 0.0012 - mean_squared_error: 0.0012 Epoch 110/2000 3426/3426 [==============================] - 2s 699us/sample - loss: 0.0012 - mean_squared_error: 0.0012 Epoch 111/2000 3426/3426 [==============================] - 2s 694us/sample - loss: 0.0012 - mean_squared_error: 0.0012 Epoch 112/2000 3426/3426 [==============================] - 2s 703us/sample - loss: 0.0012 - mean_squared_error: 0.0012 Epoch 113/2000 3426/3426 [==============================] - 2s 675us/sample - loss: 0.0012 - mean_squared_error: 0.0012 Epoch 114/2000 3426/3426 [==============================] - 2s 702us/sample - loss: 0.0012 - mean_squared_error: 0.0012 Epoch 00114: early stopping
#=================================================================================================================
# summarize history for loss
plt.plot(history.history['loss'])
#plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
#=================================================================================================================
# summarize history for loss
plt.plot(history.history['loss'][10:])
#plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
from keras.models import load_model
#import keras
history.model.save('LSTM_1')
model = tf.keras.models.load_model('LSTM_1')
WARNING:tensorflow:From C:\Users\ANPC\anaconda3\envs\py37\lib\site-packages\tensorflow_core\python\ops\resource_variable_ops.py:1786: calling BaseResourceVariable.__init__ (from tensorflow.python.ops.resource_variable_ops) with constraint is deprecated and will be removed in a future version. Instructions for updating: If using Keras pass *_constraint arguments to layers. INFO:tensorflow:Assets written to: LSTM_1\assets
y_pred = model.predict(X_test)
error = [y_pred[i]-y_test[i] for i in range(len(y_pred))]
y_pred_iv = scaler_single.inverse_transform(y_pred.reshape(-1, 1))
y_test_iv = scaler_single.inverse_transform(y_test.reshape(-1, 1))
# Visualising the results
plt.figure(figsize=(14,5))
plt.plot(y_test_iv, color = 'red', label = 'Actual SPY Price')
plt.plot(y_pred_iv, color = 'blue', label = 'Predicted SPY Price')
plt.title('SPY Price Prediction')
plt.xlabel('Time')
plt.ylabel('SPY Price')
plt.legend()
plt.show()
print('RMSE')
rmse= mean_squared_error(y_test_iv, y_pred_iv, squared=True)
print(rmse)
print('MSE')
mse= mean_squared_error(y_test_iv, y_pred_iv, squared=False)
print(rmse)
print('MAE')
mae = mean_absolute_error(y_test_iv, y_pred_iv)
print(mae)
print('MAPE')
mape = MAPE(y_test_iv,y_pred_iv)
print(mape)
print('R2')
r2 = r2_score(y_test_iv, y_pred_iv)
print(r2)
print('total absolute error')
tae = sum(abs(y_test_iv-y_pred_iv))[0]
print(tae)
#==================================================================================================
y_pred_iv_pct = [((y_pred_iv[i]/y_pred_iv[i-1]) -1) *100 for i in range(1,len(y_pred))]
y_test_iv_pct = [((y_test_iv[i]/y_test_iv[i-1]) -1) *100 for i in range(1,len(y_pred))]
print('rate of change accuracy')
rc = sum([y_pred_iv_pct[i]*y_test_iv_pct[i]>0 for i in range(len(y_test_iv_pct))])/len(y_test_iv_pct)
print(rc[0])
#columns = ['RMSE','MSE','MAE','MAPE','R2', 'error_abs_sum','rate of change acc']
df_result['LSTM'] = [rmse,mse,mae,mape,r2,tae,rc]
# Visualising the results
plt.figure(figsize=(14,5))
plt.plot(y_test_iv_pct, color = 'red', label = 'Actual SPY change rate')
plt.plot(y_pred_iv_pct, color = 'blue', label = 'Predicted SPY change rate')
plt.title('SPY Price Prediction')
plt.xlabel('Time')
plt.ylabel('SPY Price change rate')
plt.legend()
plt.show()
# Visualising the results
plt.figure(figsize=(14,5))
plt.plot(y_test_iv_pct[:30], color = 'red', label = 'Actual SPY change rate')
plt.plot(y_pred_iv_pct[:30], color = 'blue', label = 'Predicted SPY change rate')
plt.title('SPY Price Prediction')
plt.xlabel('Time')
plt.ylabel('SPY Price change rate')
plt.legend()
ax = plt.axes()
plt.grid(axis='both', which='both')
plt.show()
RMSE 27.199957666139575 MSE 27.199957666139575 MAE 3.5466426556276525 MAPE 1.5007189531770335 R2 0.9891262839783559 total absolute error 5954.813018798828 rate of change accuracy 0.5375446960667462
C:\Users\ANPC\anaconda3\envs\py37\lib\site-packages\ipykernel_launcher.py:70: MatplotlibDeprecationWarning: Adding an axes using the same arguments as a previous axes currently reuses the earlier instance. In a future version, a new instance will always be created and returned. Meanwhile, this warning can be suppressed, and the future behavior ensured, by passing a unique label to each axes instance.
y_pred_iv.reshape(-1, 1).shape
(1679, 1)
y_test_iv.shape
(1679, 1)
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense, LSTM, Dropout
from tensorflow.keras import optimizers
features = X_train.shape[2]
input_size= lookback
model = Sequential()
model.add(LSTM(units = input_size, activation = 'relu', return_sequences = True, input_shape = (X_train.shape[1], features)))
model.add(Dropout(0.2))
model.add(LSTM(units = 64, activation = 'relu', return_sequences = False))
model.add(Dropout(0.2))
model.add(Dense(units = outsize))
model.summary()
Model: "sequential_1" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= lstm_2 (LSTM) (None, 64, 64) 18176 _________________________________________________________________ dropout_2 (Dropout) (None, 64, 64) 0 _________________________________________________________________ lstm_3 (LSTM) (None, 64) 33024 _________________________________________________________________ dropout_3 (Dropout) (None, 64) 0 _________________________________________________________________ dense_1 (Dense) (None, 1) 65 ================================================================= Total params: 51,265 Trainable params: 51,265 Non-trainable params: 0 _________________________________________________________________
model.compile(optimizer=optimizer_p, loss = loss_p, metrics=metrics_p)
early_stop = EarlyStopping(monitor=monitor_p, patience=patience_p, verbose=True)
history = model.fit(X_train, y_train, epochs=epochs_p, batch_size=batch_size_p, callbacks=[early_stop])
Train on 3426 samples Epoch 1/2000 3426/3426 [==============================] - 4s 1ms/sample - loss: 0.0206 - mean_squared_error: 0.0206 Epoch 2/2000 3426/3426 [==============================] - 3s 799us/sample - loss: 0.0066 - mean_squared_error: 0.0066 Epoch 3/2000 3426/3426 [==============================] - 3s 774us/sample - loss: 0.0041 - mean_squared_error: 0.0041 Epoch 4/2000 3426/3426 [==============================] - 3s 773us/sample - loss: 0.0032 - mean_squared_error: 0.0032 Epoch 5/2000 3426/3426 [==============================] - 3s 843us/sample - loss: 0.0029 - mean_squared_error: 0.0029 Epoch 6/2000 3426/3426 [==============================] - 3s 826us/sample - loss: 0.0026 - mean_squared_error: 0.0026 Epoch 7/2000 3426/3426 [==============================] - 3s 853us/sample - loss: 0.0028 - mean_squared_error: 0.0028 Epoch 8/2000 3426/3426 [==============================] - 3s 796us/sample - loss: 0.0025 - mean_squared_error: 0.0025 Epoch 9/2000 3426/3426 [==============================] - 3s 818us/sample - loss: 0.0024 - mean_squared_error: 0.0024 Epoch 10/2000 3426/3426 [==============================] - 3s 793us/sample - loss: 0.0022 - mean_squared_error: 0.0022 Epoch 11/2000 3426/3426 [==============================] - 3s 761us/sample - loss: 0.0022 - mean_squared_error: 0.0022 Epoch 12/2000 3426/3426 [==============================] - 3s 824us/sample - loss: 0.0021 - mean_squared_error: 0.0021 Epoch 13/2000 3426/3426 [==============================] - 3s 747us/sample - loss: 0.0020 - mean_squared_error: 0.0020 Epoch 14/2000 3426/3426 [==============================] - 3s 853us/sample - loss: 0.0020 - mean_squared_error: 0.0020 Epoch 15/2000 3426/3426 [==============================] - 3s 834us/sample - loss: 0.0019 - mean_squared_error: 0.0019 Epoch 16/2000 3426/3426 [==============================] - 3s 833us/sample - loss: 0.0020 - mean_squared_error: 0.0020 Epoch 17/2000 3426/3426 [==============================] - 3s 855us/sample - loss: 0.0018 - mean_squared_error: 0.0018 Epoch 18/2000 3426/3426 [==============================] - 3s 850us/sample - loss: 0.0017 - mean_squared_error: 0.0017 Epoch 19/2000 3426/3426 [==============================] - 3s 780us/sample - loss: 0.0018 - mean_squared_error: 0.0018 Epoch 20/2000 3426/3426 [==============================] - 3s 780us/sample - loss: 0.0018 - mean_squared_error: 0.0018 Epoch 21/2000 3426/3426 [==============================] - 3s 799us/sample - loss: 0.0017 - mean_squared_error: 0.0017 Epoch 22/2000 3426/3426 [==============================] - 3s 805us/sample - loss: 0.0017 - mean_squared_error: 0.0017 Epoch 23/2000 3426/3426 [==============================] - 3s 796us/sample - loss: 0.0018 - mean_squared_error: 0.0018 Epoch 24/2000 3426/3426 [==============================] - 3s 800us/sample - loss: 0.0017 - mean_squared_error: 0.0017 Epoch 25/2000 3426/3426 [==============================] - 3s 807us/sample - loss: 0.0017 - mean_squared_error: 0.0017 Epoch 26/2000 3426/3426 [==============================] - 3s 763us/sample - loss: 0.0017 - mean_squared_error: 0.0017 - loss: 0.0016 Epoch 27/2000 3426/3426 [==============================] - 3s 811us/sample - loss: 0.0016 - mean_squared_error: 0.0016 Epoch 28/2000 3426/3426 [==============================] - 3s 817us/sample - loss: 0.0015 - mean_squared_error: 0.0015 Epoch 29/2000 3426/3426 [==============================] - 3s 811us/sample - loss: 0.0016 - mean_squared_error: 0.0016 Epoch 30/2000 3426/3426 [==============================] - 3s 791us/sample - loss: 0.0015 - mean_squared_error: 0.0015 Epoch 31/2000 3426/3426 [==============================] - 3s 760us/sample - loss: 0.0014 - mean_squared_error: 0.0014 Epoch 32/2000 3426/3426 [==============================] - 3s 759us/sample - loss: 0.0015 - mean_squared_error: 0.0015 Epoch 33/2000 3426/3426 [==============================] - 3s 761us/sample - loss: 0.0014 - mean_squared_error: 0.0014 Epoch 34/2000 3426/3426 [==============================] - 3s 793us/sample - loss: 0.0014 - mean_squared_error: 0.0014 Epoch 35/2000 3426/3426 [==============================] - 3s 783us/sample - loss: 0.0014 - mean_squared_error: 0.0014 Epoch 36/2000 3426/3426 [==============================] - 3s 806us/sample - loss: 0.0014 - mean_squared_error: 0.0014 Epoch 37/2000 3426/3426 [==============================] - 3s 762us/sample - loss: 0.0013 - mean_squared_error: 0.0013 - loss: 0.0013 Epoch 38/2000 3426/3426 [==============================] - 3s 797us/sample - loss: 0.0013 - mean_squared_error: 0.0013 Epoch 39/2000 3426/3426 [==============================] - 3s 793us/sample - loss: 0.0013 - mean_squared_error: 0.0013 Epoch 40/2000 3426/3426 [==============================] - 3s 795us/sample - loss: 0.0014 - mean_squared_error: 0.0014 Epoch 41/2000 3426/3426 [==============================] - 3s 797us/sample - loss: 0.0012 - mean_squared_error: 0.0012 Epoch 42/2000 3426/3426 [==============================] - 3s 807us/sample - loss: 0.0013 - mean_squared_error: 0.0013 Epoch 43/2000 3426/3426 [==============================] - 3s 821us/sample - loss: 0.0012 - mean_squared_error: 0.0012 Epoch 44/2000 3426/3426 [==============================] - 3s 792us/sample - loss: 0.0012 - mean_squared_error: 0.0012 Epoch 45/2000 3426/3426 [==============================] - 3s 805us/sample - loss: 0.0013 - mean_squared_error: 0.0013 Epoch 46/2000 3426/3426 [==============================] - 3s 800us/sample - loss: 0.0012 - mean_squared_error: 0.0012 Epoch 47/2000 3426/3426 [==============================] - 3s 796us/sample - loss: 0.0011 - mean_squared_error: 0.0011 Epoch 48/2000 3426/3426 [==============================] - 3s 799us/sample - loss: 0.0011 - mean_squared_error: 0.0011 Epoch 49/2000 3426/3426 [==============================] - 3s 770us/sample - loss: 0.0013 - mean_squared_error: 0.0013 Epoch 50/2000 3426/3426 [==============================] - 3s 799us/sample - loss: 0.0014 - mean_squared_error: 0.0014 Epoch 51/2000 3426/3426 [==============================] - 3s 853us/sample - loss: 0.0012 - mean_squared_error: 0.0012 Epoch 52/2000 3426/3426 [==============================] - 3s 804us/sample - loss: 0.0013 - mean_squared_error: 0.0013 - loss: 0.0013 - mean_squar Epoch 53/2000 3426/3426 [==============================] - 3s 803us/sample - loss: 0.0013 - mean_squared_error: 0.0013 Epoch 54/2000 3426/3426 [==============================] - 3s 751us/sample - loss: 0.0011 - mean_squared_error: 0.0011 Epoch 55/2000 3426/3426 [==============================] - 3s 803us/sample - loss: 0.0012 - mean_squared_error: 0.0012 Epoch 56/2000 3426/3426 [==============================] - 3s 784us/sample - loss: 0.0011 - mean_squared_error: 0.0011 Epoch 57/2000 3426/3426 [==============================] - 3s 867us/sample - loss: 0.0011 - mean_squared_error: 0.0011 Epoch 58/2000 3426/3426 [==============================] - 3s 772us/sample - loss: 0.0010 - mean_squared_error: 0.0010 ETA: 0s - loss: 0.0010 - mean_squared_error: 0. Epoch 59/2000 3426/3426 [==============================] - 3s 807us/sample - loss: 0.0011 - mean_squared_error: 0.0011 Epoch 60/2000 3426/3426 [==============================] - 3s 817us/sample - loss: 0.0010 - mean_squared_error: 0.0010 Epoch 61/2000 3426/3426 [==============================] - 3s 810us/sample - loss: 9.5587e-04 - mean_squared_error: 9.5587e-04 - loss: 9.5911e-0 Epoch 62/2000 3426/3426 [==============================] - 2s 726us/sample - loss: 0.0011 - mean_squared_error: 0.0011 Epoch 63/2000 3426/3426 [==============================] - 3s 742us/sample - loss: 9.8969e-04 - mean_squared_error: 9.8969e-04 Epoch 64/2000 3426/3426 [==============================] - 3s 730us/sample - loss: 9.9626e-04 - mean_squared_error: 9.9626e-04 Epoch 65/2000 3426/3426 [==============================] - 3s 780us/sample - loss: 9.7682e-04 - mean_squared_error: 9.7682e-04 - loss: 9.9287e-04 - mean_squared_error: 9. Epoch 66/2000 3426/3426 [==============================] - 3s 774us/sample - loss: 9.6493e-04 - mean_squared_error: 9.6493e-04 Epoch 67/2000 3426/3426 [==============================] - ETA: 0s - loss: 9.4484e-04 - mean_squared_error: 9.4484e-04 ETA: 2s - loss: 9 - ETA: 0s - loss: 9.4395e-04 - mean_squared_error: 9.43 - 2s 728us/sample - loss: 9.5085e-04 - mean_squared_error: 9.5085e-04 Epoch 68/2000 3426/3426 [==============================] - 3s 734us/sample - loss: 9.7595e-04 - mean_squared_error: 9.7595e-04 Epoch 69/2000 3426/3426 [==============================] - 2s 697us/sample - loss: 8.8673e-04 - mean_squared_error: 8.8673e-04 Epoch 70/2000 3426/3426 [==============================] - 2s 687us/sample - loss: 9.4128e-04 - mean_squared_error: 9.4128e-04 - loss: 9.4658e-04 - mean_squared_error: 9.4658e- Epoch 71/2000 3426/3426 [==============================] - 3s 730us/sample - loss: 9.5351e-04 - mean_squared_error: 9.5351e-04 Epoch 72/2000 3426/3426 [==============================] - 3s 731us/sample - loss: 9.4745e-04 - mean_squared_error: 9.4745e-04 Epoch 73/2000 3426/3426 [==============================] - 3s 736us/sample - loss: 9.5405e-04 - mean_squared_error: 9.5405e-04 Epoch 74/2000 3426/3426 [==============================] - 2s 708us/sample - loss: 9.3258e-04 - mean_squared_error: 9.3258e-04 Epoch 75/2000 3426/3426 [==============================] - 3s 783us/sample - loss: 9.1811e-04 - mean_squared_error: 9.1811e-04 Epoch 76/2000 3426/3426 [==============================] - 3s 783us/sample - loss: 8.6856e-04 - mean_squared_error: 8.6856e-04 Epoch 77/2000 3426/3426 [==============================] - 3s 794us/sample - loss: 9.8033e-04 - mean_squared_error: 9.8033e-04 Epoch 78/2000 3426/3426 [==============================] - 3s 818us/sample - loss: 9.6318e-04 - mean_squared_error: 9.6318e-04 Epoch 79/2000 3426/3426 [==============================] - 3s 805us/sample - loss: 8.5849e-04 - mean_squared_error: 8.5849e-04 Epoch 80/2000 3426/3426 [==============================] - 3s 813us/sample - loss: 8.9740e-04 - mean_squared_error: 8.9740e-04 Epoch 81/2000 3426/3426 [==============================] - 2s 716us/sample - loss: 8.8115e-04 - mean_squared_error: 8.8115e-04 Epoch 82/2000 3426/3426 [==============================] - 3s 771us/sample - loss: 9.6197e-04 - mean_squared_error: 9.6197e-04 Epoch 83/2000 3426/3426 [==============================] - 2s 728us/sample - loss: 8.9348e-04 - mean_squared_error: 8.9348e-04 Epoch 84/2000 3426/3426 [==============================] - 3s 785us/sample - loss: 8.9693e-04 - mean_squared_error: 8.9693e-04 Epoch 85/2000 3426/3426 [==============================] - 3s 767us/sample - loss: 8.6826e-04 - mean_squared_error: 8.6826e-04 Epoch 86/2000 3426/3426 [==============================] - 3s 815us/sample - loss: 8.1530e-04 - mean_squared_error: 8.1530e-04 Epoch 87/2000 3426/3426 [==============================] - 3s 803us/sample - loss: 8.4465e-04 - mean_squared_error: 8.4465e-04 Epoch 88/2000 3426/3426 [==============================] - 3s 822us/sample - loss: 8.5737e-04 - mean_squared_error: 8.5737e-04 - loss: 8.2104e Epoch 89/2000 3426/3426 [==============================] - 3s 811us/sample - loss: 9.1091e-04 - mean_squared_error: 9.1091e-04 Epoch 90/2000 3426/3426 [==============================] - 3s 801us/sample - loss: 8.5913e-04 - mean_squared_error: 8.5913e-04 - loss: 8.5432e-04 - mean_squared_e Epoch 91/2000 3426/3426 [==============================] - 3s 802us/sample - loss: 8.9375e-04 - mean_squared_error: 8.9375e-04 Epoch 92/2000 3426/3426 [==============================] - 3s 795us/sample - loss: 8.8228e-04 - mean_squared_error: 8.8228e-04 Epoch 93/2000 3426/3426 [==============================] - 3s 844us/sample - loss: 9.0575e-04 - mean_squared_error: 9.0575e-04 Epoch 94/2000 3426/3426 [==============================] - 3s 745us/sample - loss: 8.2316e-04 - mean_squared_error: 8.2316e-04 Epoch 95/2000 3426/3426 [==============================] - 2s 713us/sample - loss: 7.7035e-04 - mean_squared_error: 7.7035e-04 Epoch 96/2000 3426/3426 [==============================] - 3s 802us/sample - loss: 7.4718e-04 - mean_squared_error: 7.4718e-04 Epoch 97/2000 3426/3426 [==============================] - 3s 810us/sample - loss: 8.3461e-04 - mean_squared_error: 8.3461e-04 Epoch 98/2000 3426/3426 [==============================] - 3s 825us/sample - loss: 8.0590e-04 - mean_squared_error: 8.0590e-04 Epoch 99/2000 3426/3426 [==============================] - 3s 817us/sample - loss: 0.0017 - mean_squared_error: 0.0017 Epoch 100/2000 3426/3426 [==============================] - 3s 794us/sample - loss: 0.0020 - mean_squared_error: 0.0020 Epoch 101/2000 3426/3426 [==============================] - 2s 722us/sample - loss: 0.0013 - mean_squared_error: 0.0013 Epoch 102/2000 3426/3426 [==============================] - 3s 804us/sample - loss: 0.0011 - mean_squared_error: 0.0011 Epoch 103/2000 3426/3426 [==============================] - 3s 783us/sample - loss: 9.7898e-04 - mean_squared_error: 9.7898e-04 - loss: 9.7913e-04 - mean_squared_error: Epoch 104/2000 3426/3426 [==============================] - 3s 775us/sample - loss: 8.8696e-04 - mean_squared_error: 8.8696e-04 Epoch 105/2000 3426/3426 [==============================] - 3s 790us/sample - loss: 9.6234e-04 - mean_squared_error: 9.6234e-04 Epoch 106/2000 3426/3426 [==============================] - 3s 785us/sample - loss: 8.9515e-04 - mean_squared_error: 8.9515e-04 Epoch 00106: early stopping
#=================================================================================================================
# summarize history for loss
plt.plot(history.history['loss'])
#plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
#=================================================================================================================
# summarize history for loss
plt.plot(history.history['loss'][10:])
#plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
from keras.models import load_model
#import keras
history.model.save('LSTM_2')
model = tf.keras.models.load_model('LSTM_2')
INFO:tensorflow:Assets written to: LSTM_2\assets
X_test.shape
(1679, 64, 6)
y_pred = model.predict(X_test)
y_pred_iv = scaler_single.inverse_transform(y_pred.reshape(-1, 1))
y_test_iv = scaler_single.inverse_transform(y_test.reshape(-1, 1))
# Visualising the results
plt.figure(figsize=(14,5))
plt.plot(y_test_iv, color = 'red', label = 'Actual SPY Price')
plt.plot(y_pred_iv, color = 'blue', label = 'Predicted SPY Price')
plt.title('SPY Price Prediction')
plt.xlabel('Time')
plt.ylabel('SPY Price')
plt.legend()
plt.show()
print('RMSE')
rmse= mean_squared_error(y_test_iv, y_pred_iv, squared=True)
print(rmse)
print('MSE')
mse= mean_squared_error(y_test_iv, y_pred_iv, squared=False)
print(rmse)
print('MAE')
mae = mean_absolute_error(y_test_iv, y_pred_iv)
print(mae)
print('MAPE')
mape = MAPE(y_test_iv,y_pred_iv)
print('R2')
r2 = r2_score(y_test_iv, y_pred_iv)
print(r2)
print('total absolute error')
tae = sum(abs(y_test_iv-y_pred_iv))[0]
print(tae)
#==================================================================================================
y_pred_iv_pct = [((y_pred_iv[i]/y_pred_iv[i-1]) -1) *100 for i in range(1,len(y_pred))]
y_test_iv_pct = [((y_test_iv[i]/y_test_iv[i-1]) -1) *100 for i in range(1,len(y_pred))]
print('rate of change accuracy')
rc = sum([y_pred_iv_pct[i]*y_test_iv_pct[i]>0 for i in range(len(y_test_iv_pct))])/len(y_test_iv_pct)
print(rc[0])
df_result['LSTM_deeper'] = [rmse,mse,mae,mape,r2,tae,rc]
# Visualising the results
plt.figure(figsize=(14,5))
plt.plot(y_test_iv_pct, color = 'red', label = 'Actual SPY change rate')
plt.plot(y_pred_iv_pct, color = 'blue', label = 'Predicted SPY change rate')
plt.title('SPY Price Prediction')
plt.xlabel('Time')
plt.ylabel('SPY Price change rate')
plt.legend()
plt.show()
# Visualising the results
plt.figure(figsize=(14,5))
plt.plot(y_test_iv_pct[:30], color = 'red', label = 'Actual SPY change rate')
plt.plot(y_pred_iv_pct[:30], color = 'blue', label = 'Predicted SPY change rate')
plt.title('SPY Price Prediction')
plt.xlabel('Time')
plt.ylabel('SPY Price change rate')
plt.legend()
ax = plt.axes()
plt.grid(axis='both', which='both')
plt.show()
RMSE 39.81846815284936 MSE 39.81846815284936 MAE 4.849728319032907 MAPE R2 0.9840817871694719 total absolute error 8142.69384765625 rate of change accuracy 0.5190703218116806
C:\Users\ANPC\anaconda3\envs\py37\lib\site-packages\ipykernel_launcher.py:68: MatplotlibDeprecationWarning: Adding an axes using the same arguments as a previous axes currently reuses the earlier instance. In a future version, a new instance will always be created and returned. Meanwhile, this warning can be suppressed, and the future behavior ensured, by passing a unique label to each axes instance.
nn_model = Sequential()
nn_model.add(Dense(units=input_size, activation='relu', input_shape = (X_train.shape[1], features)))
nn_model.add(Dropout(0.2))
nn_model.add(Flatten())
nn_model.add(Dense(units = 128, activation='relu'))
nn_model.add(Dropout(0.2))
nn_model.add(Dense(units = 64, activation='relu'))
nn_model.add(Dropout(0.2))
nn_model.add(Dense(units = outsize))
nn_model.summary()
Model: "sequential_2" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= dense_2 (Dense) (None, 64, 64) 448 _________________________________________________________________ dropout_4 (Dropout) (None, 64, 64) 0 _________________________________________________________________ flatten (Flatten) (None, 4096) 0 _________________________________________________________________ dense_3 (Dense) (None, 128) 524416 _________________________________________________________________ dropout_5 (Dropout) (None, 128) 0 _________________________________________________________________ dense_4 (Dense) (None, 64) 8256 _________________________________________________________________ dropout_6 (Dropout) (None, 64) 0 _________________________________________________________________ dense_5 (Dense) (None, 1) 65 ================================================================= Total params: 533,185 Trainable params: 533,185 Non-trainable params: 0 _________________________________________________________________
nn_model.compile(optimizer=optimizer_p, loss = loss_p, metrics=metrics_p)
early_stop = EarlyStopping(monitor=monitor_p, patience=patience_p, verbose=True)
history = nn_model.fit(X_train, y_train, epochs=epochs_p, batch_size=batch_size_p, callbacks=[early_stop])
Train on 3426 samples Epoch 1/2000 3426/3426 [==============================] - 1s 147us/sample - loss: 0.1026 - mean_squared_error: 0.1026 Epoch 2/2000 3426/3426 [==============================] - 0s 27us/sample - loss: 0.0198 - mean_squared_error: 0.0198 Epoch 3/2000 3426/3426 [==============================] - 0s 27us/sample - loss: 0.0126 - mean_squared_error: 0.0126 Epoch 4/2000 3426/3426 [==============================] - 0s 26us/sample - loss: 0.0094 - mean_squared_error: 0.0094 Epoch 5/2000 3426/3426 [==============================] - 0s 28us/sample - loss: 0.0084 - mean_squared_error: 0.0084 Epoch 6/2000 3426/3426 [==============================] - 0s 28us/sample - loss: 0.0069 - mean_squared_error: 0.0069 Epoch 7/2000 3426/3426 [==============================] - 0s 27us/sample - loss: 0.0063 - mean_squared_error: 0.0063 Epoch 8/2000 3426/3426 [==============================] - 0s 27us/sample - loss: 0.0062 - mean_squared_error: 0.0062 Epoch 9/2000 3426/3426 [==============================] - 0s 27us/sample - loss: 0.0059 - mean_squared_error: 0.0059 Epoch 10/2000 3426/3426 [==============================] - 0s 28us/sample - loss: 0.0054 - mean_squared_error: 0.0054 Epoch 11/2000 3426/3426 [==============================] - 0s 28us/sample - loss: 0.0051 - mean_squared_error: 0.0051 Epoch 12/2000 3426/3426 [==============================] - 0s 28us/sample - loss: 0.0050 - mean_squared_error: 0.0050 Epoch 13/2000 3426/3426 [==============================] - 0s 27us/sample - loss: 0.0050 - mean_squared_error: 0.0050 Epoch 14/2000 3426/3426 [==============================] - 0s 27us/sample - loss: 0.0051 - mean_squared_error: 0.0051 Epoch 15/2000 3426/3426 [==============================] - 0s 26us/sample - loss: 0.0045 - mean_squared_error: 0.0045 Epoch 16/2000 3426/3426 [==============================] - 0s 28us/sample - loss: 0.0043 - mean_squared_error: 0.0043 Epoch 17/2000 3426/3426 [==============================] - 0s 26us/sample - loss: 0.0040 - mean_squared_error: 0.0040 Epoch 18/2000 3426/3426 [==============================] - 0s 26us/sample - loss: 0.0044 - mean_squared_error: 0.0044 Epoch 19/2000 3426/3426 [==============================] - 0s 27us/sample - loss: 0.0040 - mean_squared_error: 0.0040 Epoch 20/2000 3426/3426 [==============================] - 0s 25us/sample - loss: 0.0042 - mean_squared_error: 0.0042 Epoch 21/2000 3426/3426 [==============================] - 0s 27us/sample - loss: 0.0038 - mean_squared_error: 0.0038 Epoch 22/2000 3426/3426 [==============================] - 0s 27us/sample - loss: 0.0036 - mean_squared_error: 0.0036 Epoch 23/2000 3426/3426 [==============================] - 0s 27us/sample - loss: 0.0036 - mean_squared_error: 0.0036 Epoch 24/2000 3426/3426 [==============================] - 0s 27us/sample - loss: 0.0038 - mean_squared_error: 0.0038 Epoch 25/2000 3426/3426 [==============================] - 0s 26us/sample - loss: 0.0037 - mean_squared_error: 0.0037 Epoch 26/2000 3426/3426 [==============================] - 0s 27us/sample - loss: 0.0034 - mean_squared_error: 0.0034 Epoch 27/2000 3426/3426 [==============================] - 0s 27us/sample - loss: 0.0032 - mean_squared_error: 0.0032 Epoch 28/2000 3426/3426 [==============================] - 0s 27us/sample - loss: 0.0035 - mean_squared_error: 0.0035 Epoch 29/2000 3426/3426 [==============================] - 0s 27us/sample - loss: 0.0037 - mean_squared_error: 0.0037 Epoch 30/2000 3426/3426 [==============================] - 0s 26us/sample - loss: 0.0030 - mean_squared_error: 0.0030 Epoch 31/2000 3426/3426 [==============================] - 0s 28us/sample - loss: 0.0035 - mean_squared_error: 0.0035 Epoch 32/2000 3426/3426 [==============================] - 0s 27us/sample - loss: 0.0033 - mean_squared_error: 0.0033 Epoch 33/2000 3426/3426 [==============================] - 0s 26us/sample - loss: 0.0031 - mean_squared_error: 0.0031 Epoch 34/2000 3426/3426 [==============================] - 0s 29us/sample - loss: 0.0028 - mean_squared_error: 0.0028 Epoch 35/2000 3426/3426 [==============================] - 0s 28us/sample - loss: 0.0028 - mean_squared_error: 0.0028 Epoch 36/2000 3426/3426 [==============================] - 0s 29us/sample - loss: 0.0027 - mean_squared_error: 0.0027 Epoch 37/2000 3426/3426 [==============================] - 0s 27us/sample - loss: 0.0026 - mean_squared_error: 0.0026 Epoch 38/2000 3426/3426 [==============================] - 0s 29us/sample - loss: 0.0026 - mean_squared_error: 0.0026 Epoch 39/2000 3426/3426 [==============================] - 0s 28us/sample - loss: 0.0029 - mean_squared_error: 0.0029 Epoch 40/2000 3426/3426 [==============================] - 0s 28us/sample - loss: 0.0028 - mean_squared_error: 0.0028 Epoch 41/2000 3426/3426 [==============================] - 0s 27us/sample - loss: 0.0025 - mean_squared_error: 0.0025 Epoch 42/2000 3426/3426 [==============================] - 0s 26us/sample - loss: 0.0026 - mean_squared_error: 0.0026 Epoch 43/2000 3426/3426 [==============================] - 0s 26us/sample - loss: 0.0028 - mean_squared_error: 0.0028 Epoch 44/2000 3426/3426 [==============================] - 0s 27us/sample - loss: 0.0024 - mean_squared_error: 0.0024 Epoch 45/2000 3426/3426 [==============================] - 0s 26us/sample - loss: 0.0024 - mean_squared_error: 0.0024 Epoch 46/2000 3426/3426 [==============================] - 0s 27us/sample - loss: 0.0025 - mean_squared_error: 0.0025 Epoch 47/2000 3426/3426 [==============================] - 0s 27us/sample - loss: 0.0027 - mean_squared_error: 0.0027 Epoch 48/2000 3426/3426 [==============================] - 0s 27us/sample - loss: 0.0024 - mean_squared_error: 0.0024 Epoch 49/2000 3426/3426 [==============================] - 0s 28us/sample - loss: 0.0023 - mean_squared_error: 0.0023 Epoch 50/2000 3426/3426 [==============================] - 0s 27us/sample - loss: 0.0021 - mean_squared_error: 0.0021 Epoch 51/2000 3426/3426 [==============================] - 0s 27us/sample - loss: 0.0022 - mean_squared_error: 0.0022 Epoch 52/2000 3426/3426 [==============================] - 0s 26us/sample - loss: 0.0022 - mean_squared_error: 0.0022 Epoch 53/2000 3426/3426 [==============================] - 0s 26us/sample - loss: 0.0022 - mean_squared_error: 0.0022 Epoch 54/2000 3426/3426 [==============================] - 0s 27us/sample - loss: 0.0023 - mean_squared_error: 0.0023 Epoch 55/2000 3426/3426 [==============================] - 0s 27us/sample - loss: 0.0020 - mean_squared_error: 0.0020 Epoch 56/2000 3426/3426 [==============================] - 0s 27us/sample - loss: 0.0022 - mean_squared_error: 0.0022 Epoch 57/2000 3426/3426 [==============================] - 0s 27us/sample - loss: 0.0022 - mean_squared_error: 0.0022 Epoch 58/2000 3426/3426 [==============================] - 0s 27us/sample - loss: 0.0019 - mean_squared_error: 0.0019 Epoch 59/2000 3426/3426 [==============================] - 0s 28us/sample - loss: 0.0020 - mean_squared_error: 0.0020 Epoch 60/2000 3426/3426 [==============================] - 0s 27us/sample - loss: 0.0019 - mean_squared_error: 0.0019 Epoch 61/2000 3426/3426 [==============================] - 0s 27us/sample - loss: 0.0021 - mean_squared_error: 0.0021 Epoch 62/2000 3426/3426 [==============================] - 0s 27us/sample - loss: 0.0021 - mean_squared_error: 0.0021 Epoch 63/2000 3426/3426 [==============================] - 0s 26us/sample - loss: 0.0019 - mean_squared_error: 0.0019 Epoch 64/2000 3426/3426 [==============================] - 0s 26us/sample - loss: 0.0023 - mean_squared_error: 0.0023 Epoch 65/2000 3426/3426 [==============================] - 0s 28us/sample - loss: 0.0020 - mean_squared_error: 0.0020 Epoch 66/2000 3426/3426 [==============================] - 0s 27us/sample - loss: 0.0020 - mean_squared_error: 0.0020 Epoch 67/2000 3426/3426 [==============================] - 0s 26us/sample - loss: 0.0018 - mean_squared_error: 0.0018 Epoch 68/2000 3426/3426 [==============================] - 0s 27us/sample - loss: 0.0017 - mean_squared_error: 0.0017 Epoch 69/2000 3426/3426 [==============================] - 0s 27us/sample - loss: 0.0018 - mean_squared_error: 0.0018 Epoch 70/2000 3426/3426 [==============================] - 0s 27us/sample - loss: 0.0019 - mean_squared_error: 0.0019 Epoch 71/2000 3426/3426 [==============================] - 0s 27us/sample - loss: 0.0016 - mean_squared_error: 0.0016 Epoch 72/2000 3426/3426 [==============================] - 0s 26us/sample - loss: 0.0016 - mean_squared_error: 0.0016 Epoch 73/2000 3426/3426 [==============================] - 0s 26us/sample - loss: 0.0017 - mean_squared_error: 0.0017 Epoch 74/2000 3426/3426 [==============================] - 0s 27us/sample - loss: 0.0017 - mean_squared_error: 0.0017 Epoch 75/2000 3426/3426 [==============================] - 0s 27us/sample - loss: 0.0019 - mean_squared_error: 0.0019 Epoch 76/2000 3426/3426 [==============================] - 0s 27us/sample - loss: 0.0018 - mean_squared_error: 0.0018 Epoch 77/2000 3426/3426 [==============================] - 0s 27us/sample - loss: 0.0017 - mean_squared_error: 0.0017 Epoch 78/2000 3426/3426 [==============================] - 0s 27us/sample - loss: 0.0017 - mean_squared_error: 0.0017 Epoch 79/2000 3426/3426 [==============================] - 0s 26us/sample - loss: 0.0017 - mean_squared_error: 0.0017 Epoch 80/2000 3426/3426 [==============================] - 0s 27us/sample - loss: 0.0017 - mean_squared_error: 0.0017 Epoch 81/2000 3426/3426 [==============================] - 0s 27us/sample - loss: 0.0016 - mean_squared_error: 0.0016 Epoch 82/2000 3426/3426 [==============================] - 0s 27us/sample - loss: 0.0017 - mean_squared_error: 0.0017 Epoch 00082: early stopping
#=================================================================================================================
# summarize history for loss
plt.plot(history.history['loss'])
#plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
#=================================================================================================================
# summarize history for loss
plt.plot(history.history['loss'][10:])
#plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
history.model.save('DNN_1')
nn_model = tf.keras.models.load_model('DNN_1')
INFO:tensorflow:Assets written to: DNN_1\assets
y_pred = nn_model.predict(X_test)
y_pred_iv = scaler_single.inverse_transform(y_pred.reshape(-1, 1))
y_test_iv = scaler_single.inverse_transform(y_test.reshape(-1, 1))
# Visualising the results
plt.figure(figsize=(14,5))
plt.plot(y_test_iv, color = 'red', label = 'Actual SPY Price')
plt.plot(y_pred_iv, color = 'blue', label = 'Predicted SPY Price')
plt.title('SPY Price Prediction')
plt.xlabel('Time')
plt.ylabel('SPY Price')
plt.legend()
plt.show()
print('RMSE')
rmse= mean_squared_error(y_test_iv, y_pred_iv, squared=True)
print(rmse)
print('MSE')
mse= mean_squared_error(y_test_iv, y_pred_iv, squared=False)
print(rmse)
print('MAE')
mae = mean_absolute_error(y_test_iv, y_pred_iv)
print(mae)
print('MAPE')
mape = MAPE(y_test_iv,y_pred_iv)
print('R2')
r2 = r2_score(y_test_iv, y_pred_iv)
print(r2)
print('total absolute error')
tae = sum(abs(y_test_iv-y_pred_iv))[0]
print(tae)
#==================================================================================================
y_pred_iv_pct = [((y_pred_iv[i]/y_pred_iv[i-1]) -1) *100 for i in range(1,len(y_pred))]
y_test_iv_pct = [((y_test_iv[i]/y_test_iv[i-1]) -1) *100 for i in range(1,len(y_pred))]
print('rate of change accuracy')
rc = sum([y_pred_iv_pct[i]*y_test_iv_pct[i]>0 for i in range(len(y_test_iv_pct))])/len(y_test_iv_pct)
print(rc[0])
df_result['DNN'] = [rmse,mse,mae,mape,r2,tae,rc]
# Visualising the results
plt.figure(figsize=(14,5))
plt.plot(y_test_iv_pct, color = 'red', label = 'Actual SPY change rate')
plt.plot(y_pred_iv_pct, color = 'blue', label = 'Predicted SPY change rate')
plt.title('SPY Price Prediction')
plt.xlabel('Time')
plt.ylabel('SPY Price change rate')
plt.legend()
plt.show()
# Visualising the results
plt.figure(figsize=(14,5))
plt.plot(y_test_iv_pct[:30], color = 'red', label = 'Actual SPY change rate')
plt.plot(y_pred_iv_pct[:30], color = 'blue', label = 'Predicted SPY change rate')
plt.title('SPY Price Prediction')
plt.xlabel('Time')
plt.ylabel('SPY Price change rate')
plt.legend()
ax = plt.axes()
plt.grid(axis='both', which='both')
plt.show()
RMSE 152.74172440144062 MSE 152.74172440144062 MAE 9.016800462951116 MAPE R2 0.9389385029130004 total absolute error 15139.207977294922 rate of change accuracy 0.5119189511323003
C:\Users\ANPC\anaconda3\envs\py37\lib\site-packages\ipykernel_launcher.py:68: MatplotlibDeprecationWarning: Adding an axes using the same arguments as a previous axes currently reuses the earlier instance. In a future version, a new instance will always be created and returned. Meanwhile, this warning can be suppressed, and the future behavior ensured, by passing a unique label to each axes instance.
nn_model = Sequential()
nn_model.add(Dense(units=input_size, activation='relu', input_shape = (X_train.shape[1], features)))
nn_model.add(Dropout(0.2))
nn_model.add(Flatten())
nn_model.add(Dense(units = 128, activation='relu'))
nn_model.add(Dropout(0.2))
nn_model.add(Dense(units = 64, activation='relu'))
nn_model.add(Dropout(0.2))
nn_model.add(Dense(units = 64, activation='relu'))
nn_model.add(Dropout(0.2))
nn_model.add(Dense(units = outsize))
nn_model.summary()
Model: "sequential_3" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= dense_6 (Dense) (None, 64, 64) 448 _________________________________________________________________ dropout_7 (Dropout) (None, 64, 64) 0 _________________________________________________________________ flatten_1 (Flatten) (None, 4096) 0 _________________________________________________________________ dense_7 (Dense) (None, 128) 524416 _________________________________________________________________ dropout_8 (Dropout) (None, 128) 0 _________________________________________________________________ dense_8 (Dense) (None, 64) 8256 _________________________________________________________________ dropout_9 (Dropout) (None, 64) 0 _________________________________________________________________ dense_9 (Dense) (None, 64) 4160 _________________________________________________________________ dropout_10 (Dropout) (None, 64) 0 _________________________________________________________________ dense_10 (Dense) (None, 1) 65 ================================================================= Total params: 537,345 Trainable params: 537,345 Non-trainable params: 0 _________________________________________________________________
nn_model.compile(optimizer=optimizer_p, loss = loss_p, metrics=metrics_p)
early_stop = EarlyStopping(monitor=monitor_p, patience=patience_p, verbose=True)
history = nn_model.fit(X_train, y_train, epochs=epochs_p, batch_size=batch_size_p, callbacks=[early_stop])
Train on 3426 samples Epoch 1/2000 3426/3426 [==============================] - 1s 216us/sample - loss: 0.0834 - mean_squared_error: 0.0834 Epoch 2/2000 3426/3426 [==============================] - 0s 29us/sample - loss: 0.0169 - mean_squared_error: 0.0169 Epoch 3/2000 3426/3426 [==============================] - 0s 29us/sample - loss: 0.0112 - mean_squared_error: 0.0112 Epoch 4/2000 3426/3426 [==============================] - 0s 29us/sample - loss: 0.0091 - mean_squared_error: 0.0091 Epoch 5/2000 3426/3426 [==============================] - 0s 29us/sample - loss: 0.0078 - mean_squared_error: 0.0078 Epoch 6/2000 3426/3426 [==============================] - 0s 29us/sample - loss: 0.0065 - mean_squared_error: 0.0065 Epoch 7/2000 3426/3426 [==============================] - 0s 29us/sample - loss: 0.0064 - mean_squared_error: 0.0064 Epoch 8/2000 3426/3426 [==============================] - 0s 28us/sample - loss: 0.0063 - mean_squared_error: 0.0063 Epoch 9/2000 3426/3426 [==============================] - 0s 29us/sample - loss: 0.0055 - mean_squared_error: 0.0055 Epoch 10/2000 3426/3426 [==============================] - 0s 29us/sample - loss: 0.0056 - mean_squared_error: 0.0056 Epoch 11/2000 3426/3426 [==============================] - 0s 29us/sample - loss: 0.0050 - mean_squared_error: 0.0050 Epoch 12/2000 3426/3426 [==============================] - 0s 28us/sample - loss: 0.0048 - mean_squared_error: 0.0048 Epoch 13/2000 3426/3426 [==============================] - 0s 27us/sample - loss: 0.0045 - mean_squared_error: 0.0045 Epoch 14/2000 3426/3426 [==============================] - 0s 27us/sample - loss: 0.0045 - mean_squared_error: 0.0045 Epoch 15/2000 3426/3426 [==============================] - 0s 28us/sample - loss: 0.0047 - mean_squared_error: 0.0047 Epoch 16/2000 3426/3426 [==============================] - 0s 29us/sample - loss: 0.0041 - mean_squared_error: 0.0041 Epoch 17/2000 3426/3426 [==============================] - 0s 29us/sample - loss: 0.0037 - mean_squared_error: 0.0037 Epoch 18/2000 3426/3426 [==============================] - 0s 28us/sample - loss: 0.0038 - mean_squared_error: 0.0038 Epoch 19/2000 3426/3426 [==============================] - 0s 29us/sample - loss: 0.0036 - mean_squared_error: 0.0036 Epoch 20/2000 3426/3426 [==============================] - 0s 30us/sample - loss: 0.0036 - mean_squared_error: 0.0036 Epoch 21/2000 3426/3426 [==============================] - 0s 30us/sample - loss: 0.0037 - mean_squared_error: 0.0037 Epoch 22/2000 3426/3426 [==============================] - 0s 29us/sample - loss: 0.0035 - mean_squared_error: 0.0035 Epoch 23/2000 3426/3426 [==============================] - 0s 28us/sample - loss: 0.0034 - mean_squared_error: 0.0034 Epoch 24/2000 3426/3426 [==============================] - 0s 28us/sample - loss: 0.0031 - mean_squared_error: 0.0031 Epoch 25/2000 3426/3426 [==============================] - 0s 29us/sample - loss: 0.0035 - mean_squared_error: 0.0035 Epoch 26/2000 3426/3426 [==============================] - 0s 29us/sample - loss: 0.0030 - mean_squared_error: 0.0030 Epoch 27/2000 3426/3426 [==============================] - 0s 28us/sample - loss: 0.0030 - mean_squared_error: 0.0030 Epoch 28/2000 3426/3426 [==============================] - 0s 29us/sample - loss: 0.0029 - mean_squared_error: 0.0029 Epoch 29/2000 3426/3426 [==============================] - 0s 28us/sample - loss: 0.0030 - mean_squared_error: 0.0030 Epoch 30/2000 3426/3426 [==============================] - 0s 28us/sample - loss: 0.0029 - mean_squared_error: 0.0029 Epoch 31/2000 3426/3426 [==============================] - 0s 29us/sample - loss: 0.0029 - mean_squared_error: 0.0029 Epoch 32/2000 3426/3426 [==============================] - 0s 29us/sample - loss: 0.0027 - mean_squared_error: 0.0027 Epoch 33/2000 3426/3426 [==============================] - 0s 28us/sample - loss: 0.0028 - mean_squared_error: 0.0028 Epoch 34/2000 3426/3426 [==============================] - 0s 28us/sample - loss: 0.0028 - mean_squared_error: 0.0028 Epoch 35/2000 3426/3426 [==============================] - 0s 29us/sample - loss: 0.0028 - mean_squared_error: 0.0028 Epoch 36/2000 3426/3426 [==============================] - 0s 29us/sample - loss: 0.0028 - mean_squared_error: 0.0028 Epoch 37/2000 3426/3426 [==============================] - 0s 30us/sample - loss: 0.0025 - mean_squared_error: 0.0025 Epoch 38/2000 3426/3426 [==============================] - 0s 29us/sample - loss: 0.0026 - mean_squared_error: 0.0026 Epoch 39/2000 3426/3426 [==============================] - 0s 28us/sample - loss: 0.0026 - mean_squared_error: 0.0026 Epoch 40/2000 3426/3426 [==============================] - 0s 29us/sample - loss: 0.0025 - mean_squared_error: 0.0025 Epoch 41/2000 3426/3426 [==============================] - 0s 29us/sample - loss: 0.0025 - mean_squared_error: 0.0025 Epoch 42/2000 3426/3426 [==============================] - 0s 29us/sample - loss: 0.0028 - mean_squared_error: 0.0028 Epoch 43/2000 3426/3426 [==============================] - 0s 28us/sample - loss: 0.0023 - mean_squared_error: 0.0023 Epoch 44/2000 3426/3426 [==============================] - 0s 28us/sample - loss: 0.0026 - mean_squared_error: 0.0026 Epoch 45/2000 3426/3426 [==============================] - 0s 30us/sample - loss: 0.0024 - mean_squared_error: 0.0024 Epoch 46/2000 3426/3426 [==============================] - 0s 29us/sample - loss: 0.0023 - mean_squared_error: 0.0023 Epoch 47/2000 3426/3426 [==============================] - 0s 29us/sample - loss: 0.0025 - mean_squared_error: 0.0025 Epoch 48/2000 3426/3426 [==============================] - 0s 28us/sample - loss: 0.0023 - mean_squared_error: 0.0023 Epoch 49/2000 3426/3426 [==============================] - 0s 28us/sample - loss: 0.0023 - mean_squared_error: 0.0023 Epoch 50/2000 3426/3426 [==============================] - 0s 29us/sample - loss: 0.0023 - mean_squared_error: 0.0023 Epoch 51/2000 3426/3426 [==============================] - 0s 29us/sample - loss: 0.0023 - mean_squared_error: 0.0023 Epoch 52/2000 3426/3426 [==============================] - 0s 29us/sample - loss: 0.0021 - mean_squared_error: 0.0021 Epoch 53/2000 3426/3426 [==============================] - 0s 28us/sample - loss: 0.0023 - mean_squared_error: 0.0023 Epoch 54/2000 3426/3426 [==============================] - 0s 29us/sample - loss: 0.0022 - mean_squared_error: 0.0022 Epoch 55/2000 3426/3426 [==============================] - 0s 28us/sample - loss: 0.0024 - mean_squared_error: 0.0024 Epoch 56/2000 3426/3426 [==============================] - 0s 28us/sample - loss: 0.0022 - mean_squared_error: 0.0022 Epoch 57/2000 3426/3426 [==============================] - 0s 29us/sample - loss: 0.0022 - mean_squared_error: 0.0022 Epoch 58/2000 3426/3426 [==============================] - 0s 29us/sample - loss: 0.0022 - mean_squared_error: 0.0022 Epoch 59/2000 3426/3426 [==============================] - 0s 27us/sample - loss: 0.0021 - mean_squared_error: 0.0021 Epoch 60/2000 3426/3426 [==============================] - 0s 28us/sample - loss: 0.0022 - mean_squared_error: 0.0022 Epoch 61/2000 3426/3426 [==============================] - 0s 30us/sample - loss: 0.0022 - mean_squared_error: 0.0022 Epoch 62/2000 3426/3426 [==============================] - 0s 29us/sample - loss: 0.0022 - mean_squared_error: 0.0022 Epoch 63/2000 3426/3426 [==============================] - 0s 28us/sample - loss: 0.0021 - mean_squared_error: 0.0021 Epoch 64/2000 3426/3426 [==============================] - 0s 28us/sample - loss: 0.0021 - mean_squared_error: 0.0021 Epoch 65/2000 3426/3426 [==============================] - 0s 29us/sample - loss: 0.0020 - mean_squared_error: 0.0020 Epoch 66/2000 3426/3426 [==============================] - 0s 28us/sample - loss: 0.0021 - mean_squared_error: 0.0021 Epoch 67/2000 3426/3426 [==============================] - 0s 28us/sample - loss: 0.0019 - mean_squared_error: 0.0019 Epoch 68/2000 3426/3426 [==============================] - 0s 27us/sample - loss: 0.0020 - mean_squared_error: 0.0020 Epoch 69/2000 3426/3426 [==============================] - 0s 29us/sample - loss: 0.0021 - mean_squared_error: 0.0021 Epoch 70/2000 3426/3426 [==============================] - 0s 28us/sample - loss: 0.0018 - mean_squared_error: 0.0018 Epoch 71/2000 3426/3426 [==============================] - 0s 28us/sample - loss: 0.0021 - mean_squared_error: 0.0021 Epoch 72/2000 3426/3426 [==============================] - 0s 28us/sample - loss: 0.0019 - mean_squared_error: 0.0019 Epoch 73/2000 3426/3426 [==============================] - 0s 28us/sample - loss: 0.0019 - mean_squared_error: 0.0019 Epoch 74/2000 3426/3426 [==============================] - 0s 28us/sample - loss: 0.0020 - mean_squared_error: 0.0020 Epoch 75/2000 3426/3426 [==============================] - 0s 27us/sample - loss: 0.0018 - mean_squared_error: 0.0018 Epoch 76/2000 3426/3426 [==============================] - 0s 29us/sample - loss: 0.0020 - mean_squared_error: 0.0020 Epoch 77/2000 3426/3426 [==============================] - 0s 28us/sample - loss: 0.0020 - mean_squared_error: 0.0020 Epoch 78/2000 3426/3426 [==============================] - 0s 28us/sample - loss: 0.0018 - mean_squared_error: 0.0018 Epoch 79/2000 3426/3426 [==============================] - 0s 28us/sample - loss: 0.0019 - mean_squared_error: 0.0019 Epoch 80/2000 3426/3426 [==============================] - 0s 28us/sample - loss: 0.0019 - mean_squared_error: 0.0019 Epoch 81/2000 3426/3426 [==============================] - 0s 31us/sample - loss: 0.0018 - mean_squared_error: 0.0018 Epoch 82/2000 3426/3426 [==============================] - 0s 28us/sample - loss: 0.0020 - mean_squared_error: 0.0020 Epoch 83/2000 3426/3426 [==============================] - 0s 29us/sample - loss: 0.0019 - mean_squared_error: 0.0019 Epoch 84/2000 3426/3426 [==============================] - 0s 29us/sample - loss: 0.0019 - mean_squared_error: 0.0019 Epoch 85/2000 3426/3426 [==============================] - 0s 29us/sample - loss: 0.0017 - mean_squared_error: 0.0017 Epoch 86/2000 3426/3426 [==============================] - 0s 28us/sample - loss: 0.0018 - mean_squared_error: 0.0018 Epoch 87/2000 3426/3426 [==============================] - 0s 29us/sample - loss: 0.0018 - mean_squared_error: 0.0018 Epoch 88/2000 3426/3426 [==============================] - 0s 29us/sample - loss: 0.0017 - mean_squared_error: 0.0017 Epoch 89/2000 3426/3426 [==============================] - 0s 29us/sample - loss: 0.0018 - mean_squared_error: 0.0018 Epoch 90/2000 3426/3426 [==============================] - 0s 28us/sample - loss: 0.0018 - mean_squared_error: 0.0018 Epoch 91/2000 3426/3426 [==============================] - 0s 28us/sample - loss: 0.0018 - mean_squared_error: 0.0018 Epoch 92/2000 3426/3426 [==============================] - 0s 29us/sample - loss: 0.0016 - mean_squared_error: 0.0016 Epoch 93/2000 3426/3426 [==============================] - 0s 27us/sample - loss: 0.0017 - mean_squared_error: 0.0017 Epoch 94/2000 3426/3426 [==============================] - 0s 29us/sample - loss: 0.0017 - mean_squared_error: 0.0017 Epoch 95/2000 3426/3426 [==============================] - 0s 28us/sample - loss: 0.0016 - mean_squared_error: 0.0016 Epoch 96/2000 3426/3426 [==============================] - 0s 27us/sample - loss: 0.0017 - mean_squared_error: 0.0017 Epoch 97/2000 3426/3426 [==============================] - 0s 28us/sample - loss: 0.0017 - mean_squared_error: 0.0017 Epoch 98/2000 3426/3426 [==============================] - 0s 28us/sample - loss: 0.0016 - mean_squared_error: 0.0016 Epoch 99/2000 3426/3426 [==============================] - 0s 29us/sample - loss: 0.0019 - mean_squared_error: 0.0019 Epoch 100/2000 3426/3426 [==============================] - 0s 28us/sample - loss: 0.0016 - mean_squared_error: 0.0016 Epoch 101/2000 3426/3426 [==============================] - 0s 27us/sample - loss: 0.0017 - mean_squared_error: 0.0017 Epoch 102/2000 3426/3426 [==============================] - 0s 28us/sample - loss: 0.0017 - mean_squared_error: 0.0017 Epoch 103/2000 3426/3426 [==============================] - 0s 29us/sample - loss: 0.0017 - mean_squared_error: 0.0017 Epoch 104/2000 3426/3426 [==============================] - 0s 29us/sample - loss: 0.0016 - mean_squared_error: 0.0016 Epoch 105/2000 3426/3426 [==============================] - 0s 29us/sample - loss: 0.0017 - mean_squared_error: 0.0017 Epoch 106/2000 3426/3426 [==============================] - 0s 28us/sample - loss: 0.0016 - mean_squared_error: 0.0016 Epoch 107/2000 3426/3426 [==============================] - 0s 29us/sample - loss: 0.0016 - mean_squared_error: 0.0016 Epoch 108/2000 3426/3426 [==============================] - 0s 30us/sample - loss: 0.0017 - mean_squared_error: 0.0017 Epoch 109/2000 3426/3426 [==============================] - 0s 29us/sample - loss: 0.0015 - mean_squared_error: 0.0015 Epoch 110/2000 3426/3426 [==============================] - 0s 28us/sample - loss: 0.0016 - mean_squared_error: 0.0016 Epoch 111/2000 3426/3426 [==============================] - 0s 28us/sample - loss: 0.0016 - mean_squared_error: 0.0016 Epoch 112/2000 3426/3426 [==============================] - 0s 28us/sample - loss: 0.0016 - mean_squared_error: 0.0016 Epoch 113/2000 3426/3426 [==============================] - 0s 29us/sample - loss: 0.0017 - mean_squared_error: 0.0017 Epoch 114/2000 3426/3426 [==============================] - 0s 28us/sample - loss: 0.0017 - mean_squared_error: 0.0017 Epoch 115/2000 3426/3426 [==============================] - 0s 28us/sample - loss: 0.0016 - mean_squared_error: 0.0016 Epoch 116/2000 3426/3426 [==============================] - 0s 30us/sample - loss: 0.0016 - mean_squared_error: 0.0016 Epoch 117/2000 3426/3426 [==============================] - 0s 27us/sample - loss: 0.0016 - mean_squared_error: 0.0016 Epoch 118/2000 3426/3426 [==============================] - 0s 29us/sample - loss: 0.0016 - mean_squared_error: 0.0016 Epoch 119/2000 3426/3426 [==============================] - 0s 28us/sample - loss: 0.0017 - mean_squared_error: 0.0017 Epoch 00119: early stopping
#=================================================================================================================
# summarize history for loss
plt.plot(history.history['loss'])
#plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
#=================================================================================================================
# summarize history for loss
plt.plot(history.history['loss'][10:])
#plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
history.model.save('DNN_2')
nn_model = tf.keras.models.load_model('DNN_2')
INFO:tensorflow:Assets written to: DNN_2\assets
y_pred = nn_model.predict(X_test)
y_pred_iv = scaler_single.inverse_transform(y_pred.reshape(-1, 1))
y_test_iv = scaler_single.inverse_transform(y_test.reshape(-1, 1))
# Visualising the results
plt.figure(figsize=(14,5))
plt.plot(y_test_iv, color = 'red', label = 'Actual SPY Price')
plt.plot(y_pred_iv, color = 'blue', label = 'Predicted SPY Price')
plt.title('SPY Price Prediction')
plt.xlabel('Time')
plt.ylabel('SPY Price')
plt.legend()
plt.show()
print('RMSE')
rmse= mean_squared_error(y_test_iv, y_pred_iv, squared=True)
print(rmse)
print('MSE')
mse= mean_squared_error(y_test_iv, y_pred_iv, squared=False)
print(rmse)
print('MAE')
mae = mean_absolute_error(y_test_iv, y_pred_iv)
print(mae)
print('MAPE')
mape = MAPE(y_test_iv,y_pred_iv)
print('R2')
r2 = r2_score(y_test_iv, y_pred_iv)
print(r2)
print('total absolute error')
tae = sum(abs(y_test_iv-y_pred_iv))[0]
print(tae)
#==================================================================================================
y_pred_iv_pct = [((y_pred_iv[i]/y_pred_iv[i-1]) -1) *100 for i in range(1,len(y_pred))]
y_test_iv_pct = [((y_test_iv[i]/y_test_iv[i-1]) -1) *100 for i in range(1,len(y_pred))]
print('rate of change accuracy')
rc = sum([y_pred_iv_pct[i]*y_test_iv_pct[i]>0 for i in range(len(y_test_iv_pct))])/len(y_test_iv_pct)
print(rc[0])
df_result['DNN_deeper'] = [rmse,mse,mae,mape,r2,tae,rc]
# Visualising the results
plt.figure(figsize=(14,5))
plt.plot(y_test_iv_pct, color = 'red', label = 'Actual SPY change rate')
plt.plot(y_pred_iv_pct, color = 'blue', label = 'Predicted SPY change rate')
plt.title('SPY Price Prediction')
plt.xlabel('Time')
plt.ylabel('SPY Price change rate')
plt.legend()
plt.show()
# Visualising the results
plt.figure(figsize=(14,5))
plt.plot(y_test_iv_pct[:30], color = 'red', label = 'Actual SPY change rate')
plt.plot(y_pred_iv_pct[:30], color = 'blue', label = 'Predicted SPY change rate')
plt.title('SPY Price Prediction')
plt.xlabel('Time')
plt.ylabel('SPY Price change rate')
plt.legend()
ax = plt.axes()
plt.grid(axis='both', which='both')
plt.show()
RMSE 557.8070222141777 MSE 557.8070222141777 MAE 17.54747221028258 MAPE R2 0.7770057134321726 total absolute error 29462.205841064453 rate of change accuracy 0.5053635280095352
C:\Users\ANPC\anaconda3\envs\py37\lib\site-packages\ipykernel_launcher.py:67: MatplotlibDeprecationWarning: Adding an axes using the same arguments as a previous axes currently reuses the earlier instance. In a future version, a new instance will always be created and returned. Meanwhile, this warning can be suppressed, and the future behavior ensured, by passing a unique label to each axes instance.
np.sqrt(64)
8.0
#X_train, y_train = np.array(X_train), np.array(y_train)
print(X_train.shape)
sqrval= int(np.sqrt(X_train.shape[-2]*X_train.shape[-1]))
sl = int(np.sqrt(lookback))
X_train_sqr = np.reshape(X_train,(y_train.shape[0],sl,sl,6))
print(X_train_sqr.shape)
print(y_train.shape)
#X_train_sqr[0]
(3426, 64, 6) (3426, 8, 8, 6) (3426,)
import tensorflow as tf
from tensorflow.keras import datasets, layers, models
cnn_model = models.Sequential()
cnn_model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(sl, sl, 6)))
cnn_model.add(layers.MaxPooling2D((2, 2)))
cnn_model.add(layers.Conv2D(64, (3, 3), activation='relu'))
cnn_model.add(layers.Flatten())
cnn_model.add(layers.Dense(64, activation='relu'))
cnn_model.add(layers.Dense(outsize))
cnn_model.summary()
Model: "sequential_4" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d (Conv2D) (None, 6, 6, 32) 1760 _________________________________________________________________ max_pooling2d (MaxPooling2D) (None, 3, 3, 32) 0 _________________________________________________________________ conv2d_1 (Conv2D) (None, 1, 1, 64) 18496 _________________________________________________________________ flatten_2 (Flatten) (None, 64) 0 _________________________________________________________________ dense_11 (Dense) (None, 64) 4160 _________________________________________________________________ dense_12 (Dense) (None, 1) 65 ================================================================= Total params: 24,481 Trainable params: 24,481 Non-trainable params: 0 _________________________________________________________________
cnn_model.compile(loss=loss_p, optimizer=optimizer_p, metrics=metrics_p)
early_stop = EarlyStopping(monitor=monitor_p, patience=patience_p, verbose=True)
history = cnn_model.fit(X_train_sqr, y_train, epochs=epochs_p, batch_size=batch_size_p, callbacks=[early_stop])
Train on 3426 samples Epoch 1/2000 3426/3426 [==============================] - 1s 327us/sample - loss: 0.0278 - mean_squared_error: 0.0278 Epoch 2/2000 3426/3426 [==============================] - 0s 25us/sample - loss: 0.0024 - mean_squared_error: 0.0024 Epoch 3/2000 3426/3426 [==============================] - 0s 23us/sample - loss: 0.0011 - mean_squared_error: 0.0011 Epoch 4/2000 3426/3426 [==============================] - 0s 23us/sample - loss: 8.8297e-04 - mean_squared_error: 8.8297e-04 Epoch 5/2000 3426/3426 [==============================] - 0s 23us/sample - loss: 7.7451e-04 - mean_squared_error: 7.7451e-04 Epoch 6/2000 3426/3426 [==============================] - 0s 23us/sample - loss: 6.6140e-04 - mean_squared_error: 6.6140e-04 Epoch 7/2000 3426/3426 [==============================] - 0s 24us/sample - loss: 5.9879e-04 - mean_squared_error: 5.9879e-04 Epoch 8/2000 3426/3426 [==============================] - 0s 23us/sample - loss: 6.1624e-04 - mean_squared_error: 6.1624e-04 Epoch 9/2000 3426/3426 [==============================] - 0s 23us/sample - loss: 5.7503e-04 - mean_squared_error: 5.7503e-04 Epoch 10/2000 3426/3426 [==============================] - 0s 23us/sample - loss: 5.4320e-04 - mean_squared_error: 5.4320e-04 Epoch 11/2000 3426/3426 [==============================] - 0s 23us/sample - loss: 5.2084e-04 - mean_squared_error: 5.2084e-04 Epoch 12/2000 3426/3426 [==============================] - 0s 21us/sample - loss: 5.3415e-04 - mean_squared_error: 5.3415e-04 Epoch 13/2000 3426/3426 [==============================] - 0s 23us/sample - loss: 4.7690e-04 - mean_squared_error: 4.7690e-04 Epoch 14/2000 3426/3426 [==============================] - 0s 25us/sample - loss: 5.1453e-04 - mean_squared_error: 5.1453e-04 Epoch 15/2000 3426/3426 [==============================] - 0s 26us/sample - loss: 4.8072e-04 - mean_squared_error: 4.8072e-04 Epoch 16/2000 3426/3426 [==============================] - 0s 24us/sample - loss: 5.0150e-04 - mean_squared_error: 5.0150e-04 Epoch 17/2000 3426/3426 [==============================] - 0s 23us/sample - loss: 6.4139e-04 - mean_squared_error: 6.4139e-04 Epoch 18/2000 3426/3426 [==============================] - 0s 22us/sample - loss: 4.9730e-04 - mean_squared_error: 4.9730e-04 Epoch 19/2000 3426/3426 [==============================] - 0s 23us/sample - loss: 4.5605e-04 - mean_squared_error: 4.5605e-04 Epoch 20/2000 3426/3426 [==============================] - 0s 23us/sample - loss: 4.6448e-04 - mean_squared_error: 4.6448e-04 Epoch 21/2000 3426/3426 [==============================] - 0s 23us/sample - loss: 5.1585e-04 - mean_squared_error: 5.1585e-04 Epoch 22/2000 3426/3426 [==============================] - 0s 24us/sample - loss: 4.7993e-04 - mean_squared_error: 4.7993e-04 Epoch 23/2000 3426/3426 [==============================] - 0s 24us/sample - loss: 4.1353e-04 - mean_squared_error: 4.1353e-04 Epoch 24/2000 3426/3426 [==============================] - 0s 23us/sample - loss: 4.4128e-04 - mean_squared_error: 4.4128e-04 Epoch 25/2000 3426/3426 [==============================] - 0s 23us/sample - loss: 4.6657e-04 - mean_squared_error: 4.6657e-04 Epoch 26/2000 3426/3426 [==============================] - 0s 23us/sample - loss: 4.5962e-04 - mean_squared_error: 4.5962e-04 Epoch 27/2000 3426/3426 [==============================] - 0s 22us/sample - loss: 4.4483e-04 - mean_squared_error: 4.4483e-04 Epoch 28/2000 3426/3426 [==============================] - 0s 23us/sample - loss: 4.0541e-04 - mean_squared_error: 4.0541e-04 Epoch 29/2000 3426/3426 [==============================] - 0s 24us/sample - loss: 4.0727e-04 - mean_squared_error: 4.0727e-04 Epoch 30/2000 3426/3426 [==============================] - 0s 23us/sample - loss: 4.0154e-04 - mean_squared_error: 4.0154e-04 Epoch 31/2000 3426/3426 [==============================] - 0s 23us/sample - loss: 4.3413e-04 - mean_squared_error: 4.3413e-04 Epoch 32/2000 3426/3426 [==============================] - 0s 24us/sample - loss: 3.9085e-04 - mean_squared_error: 3.9085e-04 Epoch 33/2000 3426/3426 [==============================] - 0s 26us/sample - loss: 3.7459e-04 - mean_squared_error: 3.7459e-04 Epoch 34/2000 3426/3426 [==============================] - 0s 24us/sample - loss: 3.8722e-04 - mean_squared_error: 3.8722e-04 Epoch 35/2000 3426/3426 [==============================] - 0s 22us/sample - loss: 3.7239e-04 - mean_squared_error: 3.7239e-04 Epoch 36/2000 3426/3426 [==============================] - 0s 23us/sample - loss: 3.7615e-04 - mean_squared_error: 3.7615e-04 Epoch 37/2000 3426/3426 [==============================] - 0s 23us/sample - loss: 3.8656e-04 - mean_squared_error: 3.8656e-04 Epoch 38/2000 3426/3426 [==============================] - 0s 24us/sample - loss: 3.9894e-04 - mean_squared_error: 3.9894e-04 Epoch 39/2000 3426/3426 [==============================] - 0s 24us/sample - loss: 4.3186e-04 - mean_squared_error: 4.3186e-04 Epoch 40/2000 3426/3426 [==============================] - 0s 23us/sample - loss: 3.7925e-04 - mean_squared_error: 3.7925e-04 Epoch 41/2000 3426/3426 [==============================] - 0s 25us/sample - loss: 3.7773e-04 - mean_squared_error: 3.7773e-04 Epoch 42/2000 3426/3426 [==============================] - 0s 23us/sample - loss: 3.6920e-04 - mean_squared_error: 3.6920e-04 Epoch 43/2000 3426/3426 [==============================] - 0s 23us/sample - loss: 3.9929e-04 - mean_squared_error: 3.9929e-04 Epoch 44/2000 3426/3426 [==============================] - 0s 23us/sample - loss: 3.6259e-04 - mean_squared_error: 3.6259e-04 Epoch 45/2000 3426/3426 [==============================] - 0s 24us/sample - loss: 4.2741e-04 - mean_squared_error: 4.2741e-04 Epoch 46/2000 3426/3426 [==============================] - 0s 22us/sample - loss: 4.5359e-04 - mean_squared_error: 4.5359e-04 Epoch 47/2000 3426/3426 [==============================] - 0s 23us/sample - loss: 3.7135e-04 - mean_squared_error: 3.7135e-04 Epoch 48/2000 3426/3426 [==============================] - 0s 25us/sample - loss: 3.8083e-04 - mean_squared_error: 3.8083e-04 Epoch 49/2000 3426/3426 [==============================] - 0s 24us/sample - loss: 4.5486e-04 - mean_squared_error: 4.5486e-04 Epoch 50/2000 3426/3426 [==============================] - 0s 24us/sample - loss: 4.5078e-04 - mean_squared_error: 4.5078e-04 Epoch 51/2000 3426/3426 [==============================] - 0s 24us/sample - loss: 3.8475e-04 - mean_squared_error: 3.8475e-04 Epoch 52/2000 3426/3426 [==============================] - 0s 23us/sample - loss: 3.5168e-04 - mean_squared_error: 3.5168e-04 Epoch 53/2000 3426/3426 [==============================] - 0s 24us/sample - loss: 4.2950e-04 - mean_squared_error: 4.2950e-04 Epoch 54/2000 3426/3426 [==============================] - 0s 25us/sample - loss: 3.9989e-04 - mean_squared_error: 3.9989e-04 Epoch 55/2000 3426/3426 [==============================] - 0s 24us/sample - loss: 3.7491e-04 - mean_squared_error: 3.7491e-04 Epoch 56/2000 3426/3426 [==============================] - 0s 26us/sample - loss: 3.7130e-04 - mean_squared_error: 3.7130e-04 Epoch 57/2000 3426/3426 [==============================] - 0s 23us/sample - loss: 3.1857e-04 - mean_squared_error: 3.1857e-04 Epoch 58/2000 3426/3426 [==============================] - 0s 21us/sample - loss: 3.6659e-04 - mean_squared_error: 3.6659e-04 Epoch 59/2000 3426/3426 [==============================] - 0s 23us/sample - loss: 4.1392e-04 - mean_squared_error: 4.1392e-04 Epoch 60/2000 3426/3426 [==============================] - 0s 23us/sample - loss: 3.3906e-04 - mean_squared_error: 3.3906e-04 Epoch 61/2000 3426/3426 [==============================] - 0s 23us/sample - loss: 3.1825e-04 - mean_squared_error: 3.1825e-04 Epoch 62/2000 3426/3426 [==============================] - 0s 24us/sample - loss: 3.2443e-04 - mean_squared_error: 3.2443e-04 Epoch 63/2000 3426/3426 [==============================] - 0s 24us/sample - loss: 3.2787e-04 - mean_squared_error: 3.2787e-04 Epoch 64/2000 3426/3426 [==============================] - 0s 24us/sample - loss: 3.5609e-04 - mean_squared_error: 3.5609e-04 Epoch 65/2000 3426/3426 [==============================] - 0s 22us/sample - loss: 3.1959e-04 - mean_squared_error: 3.1959e-04 Epoch 66/2000 3426/3426 [==============================] - 0s 22us/sample - loss: 3.1061e-04 - mean_squared_error: 3.1061e-04 Epoch 67/2000 3426/3426 [==============================] - 0s 23us/sample - loss: 3.1209e-04 - mean_squared_error: 3.1209e-04 Epoch 68/2000 3426/3426 [==============================] - 0s 23us/sample - loss: 3.7749e-04 - mean_squared_error: 3.7749e-04 Epoch 69/2000 3426/3426 [==============================] - 0s 23us/sample - loss: 3.8219e-04 - mean_squared_error: 3.8219e-04 Epoch 70/2000 3426/3426 [==============================] - 0s 23us/sample - loss: 3.8765e-04 - mean_squared_error: 3.8765e-04 Epoch 71/2000 3426/3426 [==============================] - 0s 24us/sample - loss: 3.1923e-04 - mean_squared_error: 3.1923e-04 Epoch 72/2000 3426/3426 [==============================] - 0s 24us/sample - loss: 3.1592e-04 - mean_squared_error: 3.1592e-04 Epoch 73/2000 3426/3426 [==============================] - 0s 24us/sample - loss: 3.8673e-04 - mean_squared_error: 3.8673e-04 Epoch 74/2000 3426/3426 [==============================] - 0s 24us/sample - loss: 3.4136e-04 - mean_squared_error: 3.4136e-04 Epoch 75/2000 3426/3426 [==============================] - 0s 24us/sample - loss: 3.4776e-04 - mean_squared_error: 3.4776e-04 Epoch 76/2000 3426/3426 [==============================] - 0s 25us/sample - loss: 2.9865e-04 - mean_squared_error: 2.9865e-04 Epoch 77/2000 3426/3426 [==============================] - 0s 23us/sample - loss: 3.0367e-04 - mean_squared_error: 3.0367e-04 Epoch 78/2000 3426/3426 [==============================] - 0s 22us/sample - loss: 3.2538e-04 - mean_squared_error: 3.2538e-04 Epoch 79/2000 3426/3426 [==============================] - 0s 23us/sample - loss: 3.0952e-04 - mean_squared_error: 3.0952e-04 Epoch 80/2000 3426/3426 [==============================] - 0s 23us/sample - loss: 3.0299e-04 - mean_squared_error: 3.0299e-04 Epoch 81/2000 3426/3426 [==============================] - 0s 22us/sample - loss: 3.4989e-04 - mean_squared_error: 3.4989e-04 Epoch 82/2000 3426/3426 [==============================] - 0s 23us/sample - loss: 3.4381e-04 - mean_squared_error: 3.4381e-04 Epoch 83/2000 3426/3426 [==============================] - 0s 23us/sample - loss: 2.9941e-04 - mean_squared_error: 2.9941e-04 Epoch 84/2000 3426/3426 [==============================] - 0s 23us/sample - loss: 3.3047e-04 - mean_squared_error: 3.3047e-04 Epoch 85/2000 3426/3426 [==============================] - 0s 23us/sample - loss: 3.5142e-04 - mean_squared_error: 3.5142e-04 Epoch 86/2000 3426/3426 [==============================] - 0s 23us/sample - loss: 3.2615e-04 - mean_squared_error: 3.2615e-04 Epoch 00086: early stopping
#=================================================================================================================
# summarize history for loss
plt.plot(history.history['loss'])
#plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
#=================================================================================================================
# summarize history for loss
plt.plot(history.history['loss'][10:])
#plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
history.model.save('CNN_1')
cnn_model = tf.keras.models.load_model('CNN_1')
INFO:tensorflow:Assets written to: CNN_1\assets
#X_test, y_test = np.array(X_test), np.array(y_test)
X_test_sqr = np.reshape(X_test,(y_test.shape[0],sl,sl,6))
X_test_sqr.shape, y_test.shape
### predict the testset andd visulize the result
y_pred = cnn_model.predict(X_test_sqr)
y_pred_iv = scaler_single.inverse_transform(y_pred.reshape(-1, 1))
y_test_iv = scaler_single.inverse_transform(y_test.reshape(-1, 1))
# Visualising the results
plt.figure(figsize=(14,5))
plt.plot(y_test_iv, color = 'red', label = 'Actual SPY Price')
plt.plot(y_pred_iv, color = 'blue', label = 'Predicted SPY Price')
plt.title('SPY Price Prediction')
plt.xlabel('Time')
plt.ylabel('SPY Price')
plt.legend()
plt.show()
print('RMSE')
rmse= mean_squared_error(y_test_iv, y_pred_iv, squared=True)
print(rmse)
print('MSE')
mse= mean_squared_error(y_test_iv, y_pred_iv, squared=False)
print(rmse)
print('MAE')
mae = mean_absolute_error(y_test_iv, y_pred_iv)
print(mae)
print('MAPE')
mape = MAPE(y_test_iv,y_pred_iv)
print('R2')
r2 = r2_score(y_test_iv, y_pred_iv)
print(r2)
print('total absolute error')
tae = sum(abs(y_test_iv-y_pred_iv))[0]
print(tae)
#==================================================================================================
y_pred_iv_pct = [((y_pred_iv[i]/y_pred_iv[i-1]) -1) *100 for i in range(1,len(y_pred))]
y_test_iv_pct = [((y_test_iv[i]/y_test_iv[i-1]) -1) *100 for i in range(1,len(y_pred))]
print('rate of change accuracy')
rc = sum([y_pred_iv_pct[i]*y_test_iv_pct[i]>0 for i in range(len(y_test_iv_pct))])/len(y_test_iv_pct)
print(rc[0])
df_result['CNN'] = [rmse,mse,mae,mape,r2,tae,rc]
# Visualising the results
plt.figure(figsize=(14,5))
plt.plot(y_test_iv_pct, color = 'red', label = 'Actual SPY change rate')
plt.plot(y_pred_iv_pct, color = 'blue', label = 'Predicted SPY change rate')
plt.title('SPY Price Prediction')
plt.xlabel('Time')
plt.ylabel('SPY Price change rate')
plt.legend()
plt.show()
# Visualising the results
plt.figure(figsize=(14,5))
plt.plot(y_test_iv_pct[:30], color = 'red', label = 'Actual SPY change rate')
plt.plot(y_pred_iv_pct[:30], color = 'blue', label = 'Predicted SPY change rate')
plt.title('SPY Price Prediction')
plt.xlabel('Time')
plt.ylabel('SPY Price change rate')
plt.legend()
ax = plt.axes()
plt.grid(axis='both', which='both')
plt.show()
RMSE 40.291056657776885 MSE 40.291056657776885 MAE 4.422975141424164 MAPE R2 0.9838928606549254 total absolute error 7426.175262451172 rate of change accuracy 0.4940405244338498
C:\Users\ANPC\anaconda3\envs\py37\lib\site-packages\ipykernel_launcher.py:77: MatplotlibDeprecationWarning: Adding an axes using the same arguments as a previous axes currently reuses the earlier instance. In a future version, a new instance will always be created and returned. Meanwhile, this warning can be suppressed, and the future behavior ensured, by passing a unique label to each axes instance.
import tensorflow as tf
from tensorflow.keras import datasets, layers, models
cnn_model = models.Sequential()
cnn_model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(sl, sl, 6)))
cnn_model.add(layers.Conv2D(64, (3, 3), activation='relu'))
cnn_model.add(layers.Conv2D(64, (2, 2), activation='relu'))
cnn_model.add(layers.Conv2D(64, (2, 2), activation='relu'))
cnn_model.add(layers.MaxPooling2D((1, 1)))
cnn_model.add(layers.Flatten())
cnn_model.add(layers.Dense(64, activation='relu'))
cnn_model.add(layers.Dense(32, activation='relu'))
cnn_model.add(layers.Dense(outsize))
cnn_model.summary()
Model: "sequential_5" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d_2 (Conv2D) (None, 6, 6, 32) 1760 _________________________________________________________________ conv2d_3 (Conv2D) (None, 4, 4, 64) 18496 _________________________________________________________________ conv2d_4 (Conv2D) (None, 3, 3, 64) 16448 _________________________________________________________________ conv2d_5 (Conv2D) (None, 2, 2, 64) 16448 _________________________________________________________________ max_pooling2d_1 (MaxPooling2 (None, 2, 2, 64) 0 _________________________________________________________________ flatten_3 (Flatten) (None, 256) 0 _________________________________________________________________ dense_13 (Dense) (None, 64) 16448 _________________________________________________________________ dense_14 (Dense) (None, 32) 2080 _________________________________________________________________ dense_15 (Dense) (None, 1) 33 ================================================================= Total params: 71,713 Trainable params: 71,713 Non-trainable params: 0 _________________________________________________________________
cnn_model.compile(loss=loss_p, optimizer=optimizer_p, metrics=metrics_p)
early_stop = EarlyStopping(monitor=monitor_p, patience=patience_p, verbose=True)
history = cnn_model.fit(X_train_sqr, y_train, epochs=epochs_p, batch_size=batch_size_p, callbacks=[early_stop])
Train on 3426 samples Epoch 1/2000 3426/3426 [==============================] - 1s 191us/sample - loss: 0.0301 - mean_squared_error: 0.0301 Epoch 2/2000 3426/3426 [==============================] - 0s 34us/sample - loss: 0.0052 - mean_squared_error: 0.0052 Epoch 3/2000 3426/3426 [==============================] - 0s 32us/sample - loss: 0.0016 - mean_squared_error: 0.0016 Epoch 4/2000 3426/3426 [==============================] - 0s 31us/sample - loss: 0.0011 - mean_squared_error: 0.0011 Epoch 5/2000 3426/3426 [==============================] - 0s 32us/sample - loss: 7.0604e-04 - mean_squared_error: 7.0604e-04 Epoch 6/2000 3426/3426 [==============================] - 0s 31us/sample - loss: 7.1799e-04 - mean_squared_error: 7.1799e-04 Epoch 7/2000 3426/3426 [==============================] - 0s 32us/sample - loss: 4.9354e-04 - mean_squared_error: 4.9354e-04 Epoch 8/2000 3426/3426 [==============================] - 0s 31us/sample - loss: 5.1016e-04 - mean_squared_error: 5.1016e-04 Epoch 9/2000 3426/3426 [==============================] - 0s 32us/sample - loss: 5.3139e-04 - mean_squared_error: 5.3139e-04 Epoch 10/2000 3426/3426 [==============================] - 0s 31us/sample - loss: 3.9824e-04 - mean_squared_error: 3.9824e-04 Epoch 11/2000 3426/3426 [==============================] - 0s 33us/sample - loss: 4.4295e-04 - mean_squared_error: 4.4295e-04 Epoch 12/2000 3426/3426 [==============================] - 0s 33us/sample - loss: 3.9218e-04 - mean_squared_error: 3.9218e-04 Epoch 13/2000 3426/3426 [==============================] - 0s 32us/sample - loss: 3.9423e-04 - mean_squared_error: 3.9423e-04 Epoch 14/2000 3426/3426 [==============================] - 0s 33us/sample - loss: 3.7275e-04 - mean_squared_error: 3.7274e-04 Epoch 15/2000 3426/3426 [==============================] - 0s 32us/sample - loss: 4.1002e-04 - mean_squared_error: 4.1002e-04 Epoch 16/2000 3426/3426 [==============================] - 0s 31us/sample - loss: 3.4940e-04 - mean_squared_error: 3.4940e-04 Epoch 17/2000 3426/3426 [==============================] - 0s 33us/sample - loss: 3.5901e-04 - mean_squared_error: 3.5901e-04 Epoch 18/2000 3426/3426 [==============================] - 0s 33us/sample - loss: 3.3938e-04 - mean_squared_error: 3.3938e-04 Epoch 19/2000 3426/3426 [==============================] - 0s 32us/sample - loss: 3.9515e-04 - mean_squared_error: 3.9515e-04 Epoch 20/2000 3426/3426 [==============================] - 0s 32us/sample - loss: 3.2575e-04 - mean_squared_error: 3.2575e-04 Epoch 21/2000 3426/3426 [==============================] - 0s 32us/sample - loss: 3.7905e-04 - mean_squared_error: 3.7905e-04 Epoch 22/2000 3426/3426 [==============================] - 0s 32us/sample - loss: 4.6419e-04 - mean_squared_error: 4.6419e-04 Epoch 23/2000 3426/3426 [==============================] - 0s 33us/sample - loss: 4.5152e-04 - mean_squared_error: 4.5152e-04 Epoch 24/2000 3426/3426 [==============================] - 0s 31us/sample - loss: 4.0140e-04 - mean_squared_error: 4.0140e-04 Epoch 25/2000 3426/3426 [==============================] - 0s 32us/sample - loss: 2.8211e-04 - mean_squared_error: 2.8211e-04 Epoch 26/2000 3426/3426 [==============================] - 0s 31us/sample - loss: 3.3182e-04 - mean_squared_error: 3.3182e-04 Epoch 27/2000 3426/3426 [==============================] - 0s 33us/sample - loss: 2.7863e-04 - mean_squared_error: 2.7863e-04 Epoch 28/2000 3426/3426 [==============================] - 0s 32us/sample - loss: 4.4069e-04 - mean_squared_error: 4.4069e-04 Epoch 29/2000 3426/3426 [==============================] - 0s 33us/sample - loss: 2.8615e-04 - mean_squared_error: 2.8615e-04 Epoch 30/2000 3426/3426 [==============================] - 0s 31us/sample - loss: 3.0269e-04 - mean_squared_error: 3.0269e-04 Epoch 31/2000 3426/3426 [==============================] - 0s 33us/sample - loss: 2.9070e-04 - mean_squared_error: 2.9070e-04 Epoch 32/2000 3426/3426 [==============================] - 0s 32us/sample - loss: 2.7216e-04 - mean_squared_error: 2.7216e-04 Epoch 33/2000 3426/3426 [==============================] - 0s 34us/sample - loss: 3.5605e-04 - mean_squared_error: 3.5605e-04 Epoch 34/2000 3426/3426 [==============================] - 0s 33us/sample - loss: 4.5045e-04 - mean_squared_error: 4.5045e-04 Epoch 35/2000 3426/3426 [==============================] - 0s 32us/sample - loss: 4.6361e-04 - mean_squared_error: 4.6361e-04 Epoch 36/2000 3426/3426 [==============================] - 0s 32us/sample - loss: 2.6853e-04 - mean_squared_error: 2.6853e-04 Epoch 37/2000 3426/3426 [==============================] - 0s 31us/sample - loss: 2.9500e-04 - mean_squared_error: 2.9500e-04 Epoch 38/2000 3426/3426 [==============================] - 0s 33us/sample - loss: 2.9051e-04 - mean_squared_error: 2.9051e-04 Epoch 39/2000 3426/3426 [==============================] - 0s 33us/sample - loss: 3.1429e-04 - mean_squared_error: 3.1429e-04 Epoch 40/2000 3426/3426 [==============================] - 0s 32us/sample - loss: 2.6479e-04 - mean_squared_error: 2.6479e-04 Epoch 41/2000 3426/3426 [==============================] - 0s 34us/sample - loss: 2.4122e-04 - mean_squared_error: 2.4122e-04 Epoch 42/2000 3426/3426 [==============================] - 0s 32us/sample - loss: 2.8440e-04 - mean_squared_error: 2.8440e-04 Epoch 43/2000 3426/3426 [==============================] - 0s 32us/sample - loss: 2.3951e-04 - mean_squared_error: 2.3951e-04 Epoch 44/2000 3426/3426 [==============================] - 0s 32us/sample - loss: 2.4185e-04 - mean_squared_error: 2.4185e-04 Epoch 45/2000 3426/3426 [==============================] - 0s 33us/sample - loss: 2.3826e-04 - mean_squared_error: 2.3826e-04 Epoch 46/2000 3426/3426 [==============================] - 0s 33us/sample - loss: 2.9196e-04 - mean_squared_error: 2.9196e-04 Epoch 47/2000 3426/3426 [==============================] - 0s 35us/sample - loss: 2.9947e-04 - mean_squared_error: 2.9947e-04 Epoch 48/2000 3426/3426 [==============================] - 0s 34us/sample - loss: 3.5614e-04 - mean_squared_error: 3.5614e-04 Epoch 49/2000 3426/3426 [==============================] - 0s 33us/sample - loss: 2.2847e-04 - mean_squared_error: 2.2847e-04 Epoch 50/2000 3426/3426 [==============================] - 0s 34us/sample - loss: 3.0326e-04 - mean_squared_error: 3.0326e-04 Epoch 51/2000 3426/3426 [==============================] - 0s 33us/sample - loss: 2.8614e-04 - mean_squared_error: 2.8614e-04 Epoch 52/2000 3426/3426 [==============================] - 0s 32us/sample - loss: 3.3202e-04 - mean_squared_error: 3.3202e-04 Epoch 53/2000 3426/3426 [==============================] - 0s 32us/sample - loss: 2.6467e-04 - mean_squared_error: 2.6467e-04 Epoch 54/2000 3426/3426 [==============================] - 0s 31us/sample - loss: 2.5458e-04 - mean_squared_error: 2.5458e-04 Epoch 55/2000 3426/3426 [==============================] - 0s 31us/sample - loss: 2.6752e-04 - mean_squared_error: 2.6752e-04 Epoch 56/2000 3426/3426 [==============================] - 0s 32us/sample - loss: 2.1644e-04 - mean_squared_error: 2.1644e-04 Epoch 57/2000 3426/3426 [==============================] - 0s 31us/sample - loss: 2.3486e-04 - mean_squared_error: 2.3486e-04 Epoch 58/2000 3426/3426 [==============================] - 0s 33us/sample - loss: 2.4066e-04 - mean_squared_error: 2.4066e-04 Epoch 59/2000 3426/3426 [==============================] - 0s 32us/sample - loss: 2.1688e-04 - mean_squared_error: 2.1688e-04 Epoch 60/2000 3426/3426 [==============================] - 0s 32us/sample - loss: 2.4598e-04 - mean_squared_error: 2.4598e-04 Epoch 61/2000 3426/3426 [==============================] - 0s 31us/sample - loss: 2.4182e-04 - mean_squared_error: 2.4182e-04 Epoch 62/2000 3426/3426 [==============================] - 0s 30us/sample - loss: 2.5808e-04 - mean_squared_error: 2.5808e-04 Epoch 63/2000 3426/3426 [==============================] - 0s 32us/sample - loss: 3.4322e-04 - mean_squared_error: 3.4322e-04 Epoch 64/2000 3426/3426 [==============================] - 0s 31us/sample - loss: 2.3267e-04 - mean_squared_error: 2.3267e-04 Epoch 65/2000 3426/3426 [==============================] - 0s 31us/sample - loss: 2.4694e-04 - mean_squared_error: 2.4694e-04 Epoch 66/2000 3426/3426 [==============================] - 0s 31us/sample - loss: 2.2926e-04 - mean_squared_error: 2.2926e-04 Epoch 00066: early stopping
#=================================================================================================================
# summarize history for loss
plt.plot(history.history['loss'])
#plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
#=================================================================================================================
# summarize history for loss
plt.plot(history.history['loss'][10:])
#plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
history.model.save('CNN_2')
cnn_model = tf.keras.models.load_model('CNN_2')
INFO:tensorflow:Assets written to: CNN_2\assets
#X_test, y_test = np.array(X_test), np.array(y_test)
X_test_sqr = np.reshape(X_test,(y_test.shape[0],sl,sl,6))
X_test_sqr.shape, y_test.shape
### predict the testset andd visulize the result
y_pred = cnn_model.predict(X_test_sqr)
y_pred_iv = scaler_single.inverse_transform(y_pred.reshape(-1, 1))
y_test_iv = scaler_single.inverse_transform(y_test.reshape(-1, 1))
# Visualising the results
plt.figure(figsize=(14,5))
plt.plot(y_test_iv, color = 'red', label = 'Actual SPY Price')
plt.plot(y_pred_iv, color = 'blue', label = 'Predicted SPY Price')
plt.title('SPY Price Prediction')
plt.xlabel('Time')
plt.ylabel('SPY Price')
plt.legend()
plt.show()
print('RMSE')
rmse= mean_squared_error(y_test_iv, y_pred_iv, squared=True)
print(rmse)
print('MSE')
mse= mean_squared_error(y_test_iv, y_pred_iv, squared=False)
print(rmse)
print('MAE')
mae = mean_absolute_error(y_test_iv, y_pred_iv)
print(mae)
print('MAPE')
mape = MAPE(y_test_iv,y_pred_iv)
print('R2')
r2 = r2_score(y_test_iv, y_pred_iv)
print(r2)
print('total absolute error')
tae = sum(abs(y_test_iv-y_pred_iv))[0]
print(tae)
#==================================================================================================
y_pred_iv_pct = [((y_pred_iv[i]/y_pred_iv[i-1]) -1) *100 for i in range(1,len(y_pred))]
y_test_iv_pct = [((y_test_iv[i]/y_test_iv[i-1]) -1) *100 for i in range(1,len(y_pred))]
print('rate of change accuracy')
rc = sum([y_pred_iv_pct[i]*y_test_iv_pct[i]>0 for i in range(len(y_test_iv_pct))])/len(y_test_iv_pct)
print(rc[0])
df_result['CNN_deeper'] =[rmse,mse,mae,mape,r2,tae,rc]
# Visualising the results
plt.figure(figsize=(14,5))
plt.plot(y_test_iv_pct, color = 'red', label = 'Actual SPY change rate')
plt.plot(y_pred_iv_pct, color = 'blue', label = 'Predicted SPY change rate')
plt.title('SPY Price Prediction')
plt.xlabel('Time')
plt.ylabel('SPY Price change rate')
plt.legend()
plt.show()
# Visualising the results
plt.figure(figsize=(14,5))
plt.plot(y_test_iv_pct[:30], color = 'red', label = 'Actual SPY change rate')
plt.plot(y_pred_iv_pct[:30], color = 'blue', label = 'Predicted SPY change rate')
plt.title('SPY Price Prediction')
plt.xlabel('Time')
plt.ylabel('SPY Price change rate')
plt.legend()
ax = plt.axes()
plt.grid(axis='both', which='both')
plt.show()
RMSE 36.471789347354 MSE 36.471789347354 MAE 4.376383505952436 MAPE R2 0.9854196875953948 total absolute error 7347.947906494141 rate of change accuracy 0.49880810488676997
C:\Users\ANPC\anaconda3\envs\py37\lib\site-packages\ipykernel_launcher.py:75: MatplotlibDeprecationWarning: Adding an axes using the same arguments as a previous axes currently reuses the earlier instance. In a future version, a new instance will always be created and returned. Meanwhile, this warning can be suppressed, and the future behavior ensured, by passing a unique label to each axes instance.
def get_resnet_model(categories=2):
def residual_block(X, kernels, stride):
out = keras.layers.Conv1D(kernels, stride, padding='same')(X)
out = keras.layers.ReLU()(out)
out = keras.layers.Conv1D(kernels, stride, padding='same')(out)
out = keras.layers.add([X, out])
out = keras.layers.ReLU()(out)
#out = keras.layers.MaxPool1D(5, 2)(out)
return out
kernels = 32
stride = 5
inputs = keras.layers.Input([lookback,6])
X = keras.layers.Conv1D(kernels, stride)(inputs)
X = residual_block(X, kernels, stride)
X = residual_block(X, kernels, stride)
X = residual_block(X, kernels, stride)
X = residual_block(X, kernels, stride)
X = residual_block(X, kernels, stride)
X = keras.layers.Flatten()(X)
X = keras.layers.Dense(32, activation='relu')(X)
X = keras.layers.Dense(32, activation='relu')(X)
output = keras.layers.Dense(outsize)(X)
model = keras.Model(inputs=inputs, outputs=output)
return model
import keras
optimizer = keras.optimizers.Adam(lr=0.001)
model = get_resnet_model()
model.summary()
#model.fit(X_train_sqr, y_train, epochs=1000, batch_size=128, callbacks=[early_stop])
Model: "model_1"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_1 (InputLayer) (None, 64, 6) 0
__________________________________________________________________________________________________
conv1d_1 (Conv1D) (None, 60, 32) 992 input_1[0][0]
__________________________________________________________________________________________________
conv1d_2 (Conv1D) (None, 60, 32) 5152 conv1d_1[0][0]
__________________________________________________________________________________________________
re_lu_1 (ReLU) (None, 60, 32) 0 conv1d_2[0][0]
__________________________________________________________________________________________________
conv1d_3 (Conv1D) (None, 60, 32) 5152 re_lu_1[0][0]
__________________________________________________________________________________________________
add_1 (Add) (None, 60, 32) 0 conv1d_1[0][0]
conv1d_3[0][0]
__________________________________________________________________________________________________
re_lu_2 (ReLU) (None, 60, 32) 0 add_1[0][0]
__________________________________________________________________________________________________
conv1d_4 (Conv1D) (None, 60, 32) 5152 re_lu_2[0][0]
__________________________________________________________________________________________________
re_lu_3 (ReLU) (None, 60, 32) 0 conv1d_4[0][0]
__________________________________________________________________________________________________
conv1d_5 (Conv1D) (None, 60, 32) 5152 re_lu_3[0][0]
__________________________________________________________________________________________________
add_2 (Add) (None, 60, 32) 0 re_lu_2[0][0]
conv1d_5[0][0]
__________________________________________________________________________________________________
re_lu_4 (ReLU) (None, 60, 32) 0 add_2[0][0]
__________________________________________________________________________________________________
conv1d_6 (Conv1D) (None, 60, 32) 5152 re_lu_4[0][0]
__________________________________________________________________________________________________
re_lu_5 (ReLU) (None, 60, 32) 0 conv1d_6[0][0]
__________________________________________________________________________________________________
conv1d_7 (Conv1D) (None, 60, 32) 5152 re_lu_5[0][0]
__________________________________________________________________________________________________
add_3 (Add) (None, 60, 32) 0 re_lu_4[0][0]
conv1d_7[0][0]
__________________________________________________________________________________________________
re_lu_6 (ReLU) (None, 60, 32) 0 add_3[0][0]
__________________________________________________________________________________________________
conv1d_8 (Conv1D) (None, 60, 32) 5152 re_lu_6[0][0]
__________________________________________________________________________________________________
re_lu_7 (ReLU) (None, 60, 32) 0 conv1d_8[0][0]
__________________________________________________________________________________________________
conv1d_9 (Conv1D) (None, 60, 32) 5152 re_lu_7[0][0]
__________________________________________________________________________________________________
add_4 (Add) (None, 60, 32) 0 re_lu_6[0][0]
conv1d_9[0][0]
__________________________________________________________________________________________________
re_lu_8 (ReLU) (None, 60, 32) 0 add_4[0][0]
__________________________________________________________________________________________________
conv1d_10 (Conv1D) (None, 60, 32) 5152 re_lu_8[0][0]
__________________________________________________________________________________________________
re_lu_9 (ReLU) (None, 60, 32) 0 conv1d_10[0][0]
__________________________________________________________________________________________________
conv1d_11 (Conv1D) (None, 60, 32) 5152 re_lu_9[0][0]
__________________________________________________________________________________________________
add_5 (Add) (None, 60, 32) 0 re_lu_8[0][0]
conv1d_11[0][0]
__________________________________________________________________________________________________
re_lu_10 (ReLU) (None, 60, 32) 0 add_5[0][0]
__________________________________________________________________________________________________
flatten_1 (Flatten) (None, 1920) 0 re_lu_10[0][0]
__________________________________________________________________________________________________
dense_1 (Dense) (None, 32) 61472 flatten_1[0][0]
__________________________________________________________________________________________________
dense_2 (Dense) (None, 32) 1056 dense_1[0][0]
__________________________________________________________________________________________________
dense_3 (Dense) (None, 1) 33 dense_2[0][0]
==================================================================================================
Total params: 115,073
Trainable params: 115,073
Non-trainable params: 0
__________________________________________________________________________________________________
model.compile(loss=loss_p, optimizer=optimizer_p, metrics=metrics_p)
early_stop = EarlyStopping(monitor=monitor_p, patience=patience_p, verbose=1)
history = model.fit(X_train, y_train, epochs=epochs_p, batch_size=batch_size_p, callbacks=[early_stop])
Epoch 1/2000 3426/3426 [==============================] - 1s 239us/step - loss: 0.0422 - mean_squared_error: 0.0422 Epoch 2/2000 3426/3426 [==============================] - 0s 104us/step - loss: 0.0014 - mean_squared_error: 0.0014 Epoch 3/2000 3426/3426 [==============================] - 0s 91us/step - loss: 7.8649e-04 - mean_squared_error: 7.8649e-04 Epoch 4/2000 3426/3426 [==============================] - 0s 104us/step - loss: 6.2550e-04 - mean_squared_error: 6.2550e-04 Epoch 5/2000 3426/3426 [==============================] - 0s 99us/step - loss: 4.7723e-04 - mean_squared_error: 4.7723e-04 Epoch 6/2000 3426/3426 [==============================] - 0s 96us/step - loss: 6.4197e-04 - mean_squared_error: 6.4197e-04 Epoch 7/2000 3426/3426 [==============================] - 0s 101us/step - loss: 4.1778e-04 - mean_squared_error: 4.1778e-04 Epoch 8/2000 3426/3426 [==============================] - 0s 97us/step - loss: 5.7194e-04 - mean_squared_error: 5.7194e-04 Epoch 9/2000 3426/3426 [==============================] - 0s 97us/step - loss: 3.8858e-04 - mean_squared_error: 3.8858e-04 Epoch 10/2000 3426/3426 [==============================] - 0s 97us/step - loss: 4.9388e-04 - mean_squared_error: 4.9388e-04 Epoch 11/2000 3426/3426 [==============================] - 0s 99us/step - loss: 3.3402e-04 - mean_squared_error: 3.3402e-04 Epoch 12/2000 3426/3426 [==============================] - 0s 99us/step - loss: 3.7473e-04 - mean_squared_error: 3.7473e-04 Epoch 13/2000 3426/3426 [==============================] - 0s 102us/step - loss: 4.4280e-04 - mean_squared_error: 4.4280e-04 Epoch 14/2000 3426/3426 [==============================] - 0s 106us/step - loss: 5.0883e-04 - mean_squared_error: 5.0883e-04 Epoch 15/2000 3426/3426 [==============================] - 0s 102us/step - loss: 3.2850e-04 - mean_squared_error: 3.2850e-04 Epoch 16/2000 3426/3426 [==============================] - 0s 93us/step - loss: 4.2885e-04 - mean_squared_error: 4.2885e-04 Epoch 17/2000 3426/3426 [==============================] - 0s 96us/step - loss: 3.9038e-04 - mean_squared_error: 3.9038e-04 Epoch 18/2000 3426/3426 [==============================] - 0s 98us/step - loss: 3.0591e-04 - mean_squared_error: 3.0591e-04 Epoch 19/2000 3426/3426 [==============================] - 0s 108us/step - loss: 2.9896e-04 - mean_squared_error: 2.9896e-04 Epoch 20/2000 3426/3426 [==============================] - 0s 104us/step - loss: 2.9308e-04 - mean_squared_error: 2.9308e-04 Epoch 21/2000 3426/3426 [==============================] - 0s 90us/step - loss: 3.3804e-04 - mean_squared_error: 3.3804e-04 Epoch 22/2000 3426/3426 [==============================] - 0s 91us/step - loss: 2.9723e-04 - mean_squared_error: 2.9723e-04 Epoch 23/2000 3426/3426 [==============================] - 0s 100us/step - loss: 4.9446e-04 - mean_squared_error: 4.9446e-04 Epoch 24/2000 3426/3426 [==============================] - 0s 92us/step - loss: 4.0349e-04 - mean_squared_error: 4.0349e-04 Epoch 25/2000 3426/3426 [==============================] - 0s 94us/step - loss: 3.0166e-04 - mean_squared_error: 3.0166e-04 Epoch 26/2000 3426/3426 [==============================] - 0s 105us/step - loss: 3.8007e-04 - mean_squared_error: 3.8007e-04 Epoch 27/2000 3426/3426 [==============================] - 0s 108us/step - loss: 3.9451e-04 - mean_squared_error: 3.9451e-04 Epoch 28/2000 3426/3426 [==============================] - 0s 101us/step - loss: 2.6394e-04 - mean_squared_error: 2.6394e-04 Epoch 29/2000 3426/3426 [==============================] - 0s 94us/step - loss: 2.2187e-04 - mean_squared_error: 2.2187e-04 Epoch 30/2000 3426/3426 [==============================] - 0s 107us/step - loss: 2.1766e-04 - mean_squared_error: 2.1766e-04 Epoch 31/2000 3426/3426 [==============================] - 0s 98us/step - loss: 2.3520e-04 - mean_squared_error: 2.3520e-04 Epoch 32/2000 3426/3426 [==============================] - 0s 98us/step - loss: 2.6230e-04 - mean_squared_error: 2.6230e-04 Epoch 33/2000 3426/3426 [==============================] - 0s 100us/step - loss: 3.3684e-04 - mean_squared_error: 3.3684e-04 Epoch 34/2000 3426/3426 [==============================] - 0s 93us/step - loss: 3.5852e-04 - mean_squared_error: 3.5852e-04 Epoch 35/2000 3426/3426 [==============================] - 0s 100us/step - loss: 2.7699e-04 - mean_squared_error: 2.7699e-04 Epoch 36/2000 3426/3426 [==============================] - 0s 99us/step - loss: 4.3278e-04 - mean_squared_error: 4.3278e-04 Epoch 37/2000 3426/3426 [==============================] - 0s 100us/step - loss: 2.6744e-04 - mean_squared_error: 2.6744e-04 Epoch 38/2000 3426/3426 [==============================] - 0s 98us/step - loss: 3.4091e-04 - mean_squared_error: 3.4091e-04 Epoch 39/2000 3426/3426 [==============================] - 0s 100us/step - loss: 2.4591e-04 - mean_squared_error: 2.4591e-04 Epoch 40/2000 3426/3426 [==============================] - 0s 98us/step - loss: 2.1929e-04 - mean_squared_error: 2.1929e-04 Epoch 00040: early stopping
#=================================================================================================================
# summarize history for loss
plt.plot(history.history['loss'])
#plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
#=================================================================================================================
# summarize history for loss
plt.plot(history.history['loss'][10:])
#plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
history.model.save('RES_1')
model = tf.keras.models.load_model('RES_1')
y_pred = model.predict(X_test)
y_pred_iv = scaler_single.inverse_transform(y_pred.reshape(-1, 1))
y_test_iv = scaler_single.inverse_transform(y_test.reshape(-1, 1))
# Visualising the results
plt.figure(figsize=(14,5))
plt.plot(y_test_iv, color = 'red', label = 'Actual SPY Price')
plt.plot(y_pred_iv, color = 'blue', label = 'Predicted SPY Price')
plt.title('SPY Price Prediction')
plt.xlabel('Time')
plt.ylabel('SPY Price')
plt.legend()
plt.show()
print('RMSE')
rmse= mean_squared_error(y_test_iv, y_pred_iv, squared=True)
print(rmse)
print('MSE')
mse= mean_squared_error(y_test_iv, y_pred_iv, squared=False)
print(rmse)
print('MAE')
mae = mean_absolute_error(y_test_iv, y_pred_iv)
print(mae)
print('MAPE')
mape = MAPE(y_test_iv,y_pred_iv)
print('R2')
r2 = r2_score(y_test_iv, y_pred_iv)
print(r2)
print('total absolute error')
tae = sum(abs(y_test_iv-y_pred_iv))[0]
print(tae)
#==================================================================================================
y_pred_iv_pct = [((y_pred_iv[i]/y_pred_iv[i-1]) -1) *100 for i in range(1,len(y_pred))]
y_test_iv_pct = [((y_test_iv[i]/y_test_iv[i-1]) -1) *100 for i in range(1,len(y_pred))]
print('rate of change accuracy')
rc = sum([y_pred_iv_pct[i]*y_test_iv_pct[i]>0 for i in range(len(y_test_iv_pct))])/len(y_test_iv_pct)
print(rc[0])
df_result['ResNet_5_layer'] = [rmse,mse,mae,mape,r2,tae,rc]
# Visualising the results
plt.figure(figsize=(14,5))
plt.plot(y_test_iv_pct, color = 'red', label = 'Actual SPY change rate')
plt.plot(y_pred_iv_pct, color = 'blue', label = 'Predicted SPY change rate')
plt.title('SPY Price Prediction')
plt.xlabel('Time')
plt.ylabel('SPY Price change rate')
plt.legend()
plt.show()
# Visualising the results
plt.figure(figsize=(14,5))
plt.plot(y_test_iv_pct[:30], color = 'red', label = 'Actual SPY change rate')
plt.plot(y_pred_iv_pct[:30], color = 'blue', label = 'Predicted SPY change rate')
plt.title('SPY Price Prediction')
plt.xlabel('Time')
plt.ylabel('SPY Price change rate')
plt.legend()
ax = plt.axes()
plt.grid(axis='both', which='both')
plt.show()
#df_result['ResNet_5_layer'] = [rmse,mae,r2,tae]
RMSE 31.18896346601828 MSE 31.18896346601828 MAE 3.993054351897521 MAPE R2 0.9875316007509416 total absolute error 6704.3382568359375 rate of change accuracy 0.5143027413587604
C:\Users\ANPC\anaconda3\envs\py37\lib\site-packages\ipykernel_launcher.py:67: MatplotlibDeprecationWarning: Adding an axes using the same arguments as a previous axes currently reuses the earlier instance. In a future version, a new instance will always be created and returned. Meanwhile, this warning can be suppressed, and the future behavior ensured, by passing a unique label to each axes instance.
def get_resnet_model(categories=2):
def residual_block(X, kernels, stride):
out = keras.layers.Conv1D(kernels, stride, padding='same')(X)
out = keras.layers.ReLU()(out)
out = keras.layers.Conv1D(kernels, stride, padding='same')(out)
out = keras.layers.add([X, out])
out = keras.layers.ReLU()(out)
out = keras.layers.MaxPool1D(5, 2)(out)
return out
kernels = 32
stride = 5
inputs = keras.layers.Input([lookback,6])
X = keras.layers.Conv1D(kernels, stride)(inputs)
X = residual_block(X, kernels, stride)
#X = keras.layers.MaxPool1D(5, 2)(X)
X = keras.layers.Flatten()(X)
X = keras.layers.Dense(64, activation='relu')(X)
X = keras.layers.Dense(32, activation='relu')(X)
output = keras.layers.Dense(outsize)(X)
model = keras.Model(inputs=inputs, outputs=output)
return model
import keras
optimizer = keras.optimizers.Adam(lr=0.001)
model = get_resnet_model()
model.summary()
#model.fit(X_train_sqr, y_train, epochs=1000, batch_size=128, callbacks=[early_stop])
Model: "model_2"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_2 (InputLayer) (None, 64, 6) 0
__________________________________________________________________________________________________
conv1d_12 (Conv1D) (None, 60, 32) 992 input_2[0][0]
__________________________________________________________________________________________________
conv1d_13 (Conv1D) (None, 60, 32) 5152 conv1d_12[0][0]
__________________________________________________________________________________________________
re_lu_11 (ReLU) (None, 60, 32) 0 conv1d_13[0][0]
__________________________________________________________________________________________________
conv1d_14 (Conv1D) (None, 60, 32) 5152 re_lu_11[0][0]
__________________________________________________________________________________________________
add_6 (Add) (None, 60, 32) 0 conv1d_12[0][0]
conv1d_14[0][0]
__________________________________________________________________________________________________
re_lu_12 (ReLU) (None, 60, 32) 0 add_6[0][0]
__________________________________________________________________________________________________
max_pooling1d_1 (MaxPooling1D) (None, 28, 32) 0 re_lu_12[0][0]
__________________________________________________________________________________________________
flatten_2 (Flatten) (None, 896) 0 max_pooling1d_1[0][0]
__________________________________________________________________________________________________
dense_4 (Dense) (None, 64) 57408 flatten_2[0][0]
__________________________________________________________________________________________________
dense_5 (Dense) (None, 32) 2080 dense_4[0][0]
__________________________________________________________________________________________________
dense_6 (Dense) (None, 1) 33 dense_5[0][0]
==================================================================================================
Total params: 70,817
Trainable params: 70,817
Non-trainable params: 0
__________________________________________________________________________________________________
early_stop = EarlyStopping(monitor=monitor_p, patience=patience_p, verbose=1)
model.compile(optimizer=optimizer_p, loss=loss_p, metrics=metrics_p)
history = model.fit(X_train, y_train, epochs=epochs_p, batch_size=epochs_p, callbacks=[early_stop])
#model.fit(X_train, y_train, epochs=1000, batch_size=128)
Epoch 1/2000 3426/3426 [==============================] - 1s 180us/step - loss: 0.1194 - mean_squared_error: 0.1194 Epoch 2/2000 3426/3426 [==============================] - 0s 9us/step - loss: 0.0170 - mean_squared_error: 0.0170 Epoch 3/2000 3426/3426 [==============================] - 0s 8us/step - loss: 0.0092 - mean_squared_error: 0.0092 Epoch 4/2000 3426/3426 [==============================] - 0s 9us/step - loss: 0.0112 - mean_squared_error: 0.0112 Epoch 5/2000 3426/3426 [==============================] - 0s 9us/step - loss: 0.0062 - mean_squared_error: 0.0062 Epoch 6/2000 3426/3426 [==============================] - 0s 9us/step - loss: 0.0069 - mean_squared_error: 0.0069 Epoch 7/2000 3426/3426 [==============================] - 0s 9us/step - loss: 0.0060 - mean_squared_error: 0.0060 Epoch 8/2000 3426/3426 [==============================] - 0s 9us/step - loss: 0.0041 - mean_squared_error: 0.0041 Epoch 9/2000 3426/3426 [==============================] - 0s 9us/step - loss: 0.0040 - mean_squared_error: 0.0040 Epoch 10/2000 3426/3426 [==============================] - 0s 9us/step - loss: 0.0029 - mean_squared_error: 0.0029 Epoch 11/2000 3426/3426 [==============================] - 0s 9us/step - loss: 0.0034 - mean_squared_error: 0.0034 Epoch 12/2000 3426/3426 [==============================] - 0s 9us/step - loss: 0.0024 - mean_squared_error: 0.0024 Epoch 13/2000 3426/3426 [==============================] - 0s 9us/step - loss: 0.0027 - mean_squared_error: 0.0027 Epoch 14/2000 3426/3426 [==============================] - 0s 8us/step - loss: 0.0018 - mean_squared_error: 0.0018 Epoch 15/2000 3426/3426 [==============================] - 0s 9us/step - loss: 0.0021 - mean_squared_error: 0.0021 Epoch 16/2000 3426/3426 [==============================] - 0s 9us/step - loss: 0.0015 - mean_squared_error: 0.0015 Epoch 17/2000 3426/3426 [==============================] - 0s 9us/step - loss: 0.0016 - mean_squared_error: 0.0016 Epoch 18/2000 3426/3426 [==============================] - 0s 9us/step - loss: 0.0015 - mean_squared_error: 0.0015 Epoch 19/2000 3426/3426 [==============================] - 0s 9us/step - loss: 0.0012 - mean_squared_error: 0.0012 Epoch 20/2000 3426/3426 [==============================] - 0s 9us/step - loss: 0.0013 - mean_squared_error: 0.0013 Epoch 21/2000 3426/3426 [==============================] - 0s 8us/step - loss: 0.0012 - mean_squared_error: 0.0012 Epoch 22/2000 3426/3426 [==============================] - 0s 9us/step - loss: 0.0011 - mean_squared_error: 0.0011 Epoch 23/2000 3426/3426 [==============================] - 0s 9us/step - loss: 0.0011 - mean_squared_error: 0.0011 Epoch 24/2000 3426/3426 [==============================] - 0s 8us/step - loss: 0.0010 - mean_squared_error: 0.0010 Epoch 25/2000 3426/3426 [==============================] - 0s 8us/step - loss: 9.8253e-04 - mean_squared_error: 9.8253e-04 Epoch 26/2000 3426/3426 [==============================] - 0s 9us/step - loss: 9.3805e-04 - mean_squared_error: 9.3805e-04 Epoch 27/2000 3426/3426 [==============================] - 0s 9us/step - loss: 9.3445e-04 - mean_squared_error: 9.3445e-04 Epoch 28/2000 3426/3426 [==============================] - 0s 9us/step - loss: 9.0256e-04 - mean_squared_error: 9.0256e-04 Epoch 29/2000 3426/3426 [==============================] - 0s 9us/step - loss: 8.8277e-04 - mean_squared_error: 8.8277e-04 Epoch 30/2000 3426/3426 [==============================] - 0s 9us/step - loss: 8.4740e-04 - mean_squared_error: 8.4740e-04 Epoch 31/2000 3426/3426 [==============================] - 0s 8us/step - loss: 8.3153e-04 - mean_squared_error: 8.3153e-04 Epoch 32/2000 3426/3426 [==============================] - 0s 10us/step - loss: 8.2767e-04 - mean_squared_error: 8.2767e-04 Epoch 33/2000 3426/3426 [==============================] - 0s 9us/step - loss: 8.0357e-04 - mean_squared_error: 8.0357e-04 Epoch 34/2000 3426/3426 [==============================] - 0s 8us/step - loss: 7.8508e-04 - mean_squared_error: 7.8508e-04 Epoch 35/2000 3426/3426 [==============================] - 0s 9us/step - loss: 7.6620e-04 - mean_squared_error: 7.6620e-04 Epoch 36/2000 3426/3426 [==============================] - 0s 9us/step - loss: 7.5534e-04 - mean_squared_error: 7.5534e-04 Epoch 37/2000 3426/3426 [==============================] - 0s 8us/step - loss: 7.3876e-04 - mean_squared_error: 7.3876e-04 Epoch 38/2000 3426/3426 [==============================] - 0s 9us/step - loss: 7.2084e-04 - mean_squared_error: 7.2084e-04 Epoch 39/2000 3426/3426 [==============================] - 0s 9us/step - loss: 7.1084e-04 - mean_squared_error: 7.1084e-04 Epoch 40/2000 3426/3426 [==============================] - 0s 9us/step - loss: 6.9409e-04 - mean_squared_error: 6.9409e-04 Epoch 41/2000 3426/3426 [==============================] - 0s 9us/step - loss: 6.8188e-04 - mean_squared_error: 6.8188e-04 Epoch 42/2000 3426/3426 [==============================] - 0s 8us/step - loss: 6.6806e-04 - mean_squared_error: 6.6806e-04 Epoch 43/2000 3426/3426 [==============================] - 0s 9us/step - loss: 6.5450e-04 - mean_squared_error: 6.5450e-04 Epoch 44/2000 3426/3426 [==============================] - 0s 8us/step - loss: 6.4303e-04 - mean_squared_error: 6.4303e-04 Epoch 45/2000 3426/3426 [==============================] - 0s 9us/step - loss: 6.3120e-04 - mean_squared_error: 6.3120e-04 Epoch 46/2000 3426/3426 [==============================] - 0s 9us/step - loss: 6.2094e-04 - mean_squared_error: 6.2094e-04 Epoch 47/2000 3426/3426 [==============================] - 0s 9us/step - loss: 6.1198e-04 - mean_squared_error: 6.1198e-04 Epoch 48/2000 3426/3426 [==============================] - 0s 9us/step - loss: 6.0175e-04 - mean_squared_error: 6.0175e-04 Epoch 49/2000 3426/3426 [==============================] - 0s 9us/step - loss: 5.9398e-04 - mean_squared_error: 5.9398e-04 Epoch 50/2000 3426/3426 [==============================] - 0s 8us/step - loss: 5.7518e-04 - mean_squared_error: 5.7518e-04 Epoch 51/2000 3426/3426 [==============================] - 0s 9us/step - loss: 5.6927e-04 - mean_squared_error: 5.6927e-04 Epoch 52/2000 3426/3426 [==============================] - 0s 9us/step - loss: 5.6973e-04 - mean_squared_error: 5.6973e-04 Epoch 53/2000 3426/3426 [==============================] - 0s 9us/step - loss: 5.5303e-04 - mean_squared_error: 5.5303e-04 Epoch 54/2000 3426/3426 [==============================] - 0s 8us/step - loss: 5.4366e-04 - mean_squared_error: 5.4366e-04 Epoch 55/2000 3426/3426 [==============================] - 0s 8us/step - loss: 5.3487e-04 - mean_squared_error: 5.3487e-04 Epoch 56/2000 3426/3426 [==============================] - 0s 8us/step - loss: 5.2314e-04 - mean_squared_error: 5.2314e-04 Epoch 57/2000 3426/3426 [==============================] - 0s 8us/step - loss: 5.1694e-04 - mean_squared_error: 5.1694e-04 Epoch 58/2000 3426/3426 [==============================] - 0s 9us/step - loss: 5.1465e-04 - mean_squared_error: 5.1465e-04 Epoch 59/2000 3426/3426 [==============================] - 0s 8us/step - loss: 5.0198e-04 - mean_squared_error: 5.0198e-04 Epoch 60/2000 3426/3426 [==============================] - 0s 8us/step - loss: 4.9380e-04 - mean_squared_error: 4.9380e-04 Epoch 61/2000 3426/3426 [==============================] - 0s 8us/step - loss: 4.9679e-04 - mean_squared_error: 4.9679e-04 Epoch 62/2000 3426/3426 [==============================] - 0s 9us/step - loss: 4.7821e-04 - mean_squared_error: 4.7821e-04 Epoch 63/2000 3426/3426 [==============================] - 0s 8us/step - loss: 4.7637e-04 - mean_squared_error: 4.7637e-04 Epoch 64/2000 3426/3426 [==============================] - 0s 10us/step - loss: 4.6837e-04 - mean_squared_error: 4.6837e-04 Epoch 65/2000 3426/3426 [==============================] - 0s 8us/step - loss: 4.6042e-04 - mean_squared_error: 4.6042e-04 Epoch 66/2000 3426/3426 [==============================] - 0s 8us/step - loss: 4.5726e-04 - mean_squared_error: 4.5726e-04 Epoch 67/2000 3426/3426 [==============================] - 0s 9us/step - loss: 4.6788e-04 - mean_squared_error: 4.6788e-04 Epoch 68/2000 3426/3426 [==============================] - 0s 8us/step - loss: 4.4610e-04 - mean_squared_error: 4.4610e-04 Epoch 69/2000 3426/3426 [==============================] - 0s 8us/step - loss: 4.6932e-04 - mean_squared_error: 4.6932e-04 Epoch 70/2000 3426/3426 [==============================] - 0s 8us/step - loss: 4.3028e-04 - mean_squared_error: 4.3028e-04 Epoch 71/2000 3426/3426 [==============================] - 0s 8us/step - loss: 4.2430e-04 - mean_squared_error: 4.2430e-04 Epoch 72/2000 3426/3426 [==============================] - 0s 8us/step - loss: 4.2038e-04 - mean_squared_error: 4.2038e-04 Epoch 73/2000 3426/3426 [==============================] - 0s 8us/step - loss: 4.1245e-04 - mean_squared_error: 4.1245e-04 Epoch 74/2000 3426/3426 [==============================] - 0s 8us/step - loss: 4.1603e-04 - mean_squared_error: 4.1603e-04 Epoch 75/2000 3426/3426 [==============================] - 0s 8us/step - loss: 4.0216e-04 - mean_squared_error: 4.0216e-04 Epoch 76/2000 3426/3426 [==============================] - 0s 9us/step - loss: 4.0192e-04 - mean_squared_error: 4.0192e-04 Epoch 77/2000 3426/3426 [==============================] - 0s 9us/step - loss: 4.1673e-04 - mean_squared_error: 4.1673e-04 Epoch 78/2000 3426/3426 [==============================] - 0s 9us/step - loss: 3.9949e-04 - mean_squared_error: 3.9949e-04 Epoch 79/2000 3426/3426 [==============================] - 0s 9us/step - loss: 3.9524e-04 - mean_squared_error: 3.9524e-04 Epoch 80/2000 3426/3426 [==============================] - 0s 9us/step - loss: 3.8437e-04 - mean_squared_error: 3.8437e-04 Epoch 81/2000 3426/3426 [==============================] - 0s 9us/step - loss: 4.0578e-04 - mean_squared_error: 4.0578e-04 Epoch 82/2000 3426/3426 [==============================] - 0s 8us/step - loss: 3.9011e-04 - mean_squared_error: 3.9011e-04 Epoch 83/2000 3426/3426 [==============================] - 0s 9us/step - loss: 3.7620e-04 - mean_squared_error: 3.7620e-04 Epoch 84/2000 3426/3426 [==============================] - 0s 8us/step - loss: 3.7349e-04 - mean_squared_error: 3.7349e-04 Epoch 85/2000 3426/3426 [==============================] - 0s 9us/step - loss: 3.7458e-04 - mean_squared_error: 3.7458e-04 Epoch 86/2000 3426/3426 [==============================] - 0s 9us/step - loss: 3.6537e-04 - mean_squared_error: 3.6537e-04 Epoch 87/2000 3426/3426 [==============================] - 0s 9us/step - loss: 3.7426e-04 - mean_squared_error: 3.7426e-04 Epoch 88/2000 3426/3426 [==============================] - 0s 8us/step - loss: 3.6610e-04 - mean_squared_error: 3.6610e-04 Epoch 89/2000 3426/3426 [==============================] - 0s 8us/step - loss: 3.5926e-04 - mean_squared_error: 3.5926e-04 Epoch 90/2000 3426/3426 [==============================] - 0s 9us/step - loss: 3.5932e-04 - mean_squared_error: 3.5932e-04 Epoch 91/2000 3426/3426 [==============================] - 0s 9us/step - loss: 3.5278e-04 - mean_squared_error: 3.5278e-04 Epoch 92/2000 3426/3426 [==============================] - 0s 9us/step - loss: 3.4916e-04 - mean_squared_error: 3.4916e-04 Epoch 93/2000 3426/3426 [==============================] - 0s 9us/step - loss: 3.4730e-04 - mean_squared_error: 3.4730e-04 Epoch 94/2000 3426/3426 [==============================] - 0s 9us/step - loss: 3.5727e-04 - mean_squared_error: 3.5727e-04 Epoch 95/2000 3426/3426 [==============================] - 0s 9us/step - loss: 3.4464e-04 - mean_squared_error: 3.4464e-04 Epoch 96/2000 3426/3426 [==============================] - 0s 9us/step - loss: 3.4409e-04 - mean_squared_error: 3.4409e-04 Epoch 97/2000 3426/3426 [==============================] - 0s 9us/step - loss: 3.4280e-04 - mean_squared_error: 3.4280e-04 Epoch 98/2000 3426/3426 [==============================] - 0s 8us/step - loss: 3.7866e-04 - mean_squared_error: 3.7866e-04 Epoch 99/2000 3426/3426 [==============================] - 0s 8us/step - loss: 3.9455e-04 - mean_squared_error: 3.9455e-04 Epoch 100/2000 3426/3426 [==============================] - 0s 9us/step - loss: 3.8321e-04 - mean_squared_error: 3.8321e-04 Epoch 101/2000 3426/3426 [==============================] - 0s 8us/step - loss: 3.4464e-04 - mean_squared_error: 3.4464e-04 Epoch 102/2000 3426/3426 [==============================] - 0s 8us/step - loss: 3.7215e-04 - mean_squared_error: 3.7215e-04 Epoch 103/2000 3426/3426 [==============================] - 0s 8us/step - loss: 3.3558e-04 - mean_squared_error: 3.3558e-04 Epoch 104/2000 3426/3426 [==============================] - 0s 9us/step - loss: 3.9344e-04 - mean_squared_error: 3.9344e-04 Epoch 105/2000 3426/3426 [==============================] - 0s 8us/step - loss: 3.3498e-04 - mean_squared_error: 3.3498e-04 Epoch 106/2000 3426/3426 [==============================] - 0s 9us/step - loss: 3.6587e-04 - mean_squared_error: 3.6587e-04 Epoch 107/2000 3426/3426 [==============================] - 0s 9us/step - loss: 3.2939e-04 - mean_squared_error: 3.2939e-04 Epoch 108/2000 3426/3426 [==============================] - 0s 9us/step - loss: 3.2104e-04 - mean_squared_error: 3.2104e-04 Epoch 109/2000 3426/3426 [==============================] - 0s 8us/step - loss: 3.5146e-04 - mean_squared_error: 3.5146e-04 Epoch 110/2000 3426/3426 [==============================] - 0s 8us/step - loss: 3.8009e-04 - mean_squared_error: 3.8009e-04 Epoch 111/2000 3426/3426 [==============================] - 0s 9us/step - loss: 3.3810e-04 - mean_squared_error: 3.3810e-04 Epoch 112/2000 3426/3426 [==============================] - 0s 9us/step - loss: 3.4495e-04 - mean_squared_error: 3.4495e-04 Epoch 113/2000 3426/3426 [==============================] - 0s 9us/step - loss: 3.1854e-04 - mean_squared_error: 3.1854e-04 Epoch 114/2000 3426/3426 [==============================] - 0s 9us/step - loss: 3.1962e-04 - mean_squared_error: 3.1962e-04 Epoch 115/2000 3426/3426 [==============================] - 0s 9us/step - loss: 3.1132e-04 - mean_squared_error: 3.1132e-04 Epoch 116/2000 3426/3426 [==============================] - 0s 8us/step - loss: 3.0902e-04 - mean_squared_error: 3.0902e-04 Epoch 117/2000 3426/3426 [==============================] - 0s 9us/step - loss: 3.0958e-04 - mean_squared_error: 3.0958e-04 Epoch 118/2000 3426/3426 [==============================] - 0s 9us/step - loss: 3.1645e-04 - mean_squared_error: 3.1645e-04 Epoch 119/2000 3426/3426 [==============================] - 0s 9us/step - loss: 3.0932e-04 - mean_squared_error: 3.0932e-04 Epoch 120/2000 3426/3426 [==============================] - 0s 8us/step - loss: 3.8016e-04 - mean_squared_error: 3.8016e-04 Epoch 121/2000 3426/3426 [==============================] - 0s 8us/step - loss: 3.3306e-04 - mean_squared_error: 3.3306e-04 Epoch 122/2000 3426/3426 [==============================] - 0s 8us/step - loss: 3.3858e-04 - mean_squared_error: 3.3858e-04 Epoch 123/2000 3426/3426 [==============================] - 0s 8us/step - loss: 3.4204e-04 - mean_squared_error: 3.4204e-04 Epoch 124/2000 3426/3426 [==============================] - 0s 8us/step - loss: 3.4447e-04 - mean_squared_error: 3.4447e-04 Epoch 125/2000 3426/3426 [==============================] - 0s 8us/step - loss: 3.9856e-04 - mean_squared_error: 3.9856e-04 Epoch 126/2000 3426/3426 [==============================] - 0s 8us/step - loss: 3.1511e-04 - mean_squared_error: 3.1511e-04 Epoch 00126: early stopping
#=================================================================================================================
# summarize history for loss
plt.plot(history.history['loss'])
#plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
#=================================================================================================================
# summarize history for loss
plt.plot(history.history['loss'][20:])
#plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
history.model.save('RES_2')
model = tf.keras.models.load_model('RES_2')
y_pred = model.predict(X_test)
y_pred_iv = scaler_single.inverse_transform(y_pred.reshape(-1, 1))
y_test_iv = scaler_single.inverse_transform(y_test.reshape(-1, 1))
# Visualising the results
plt.figure(figsize=(14,5))
plt.plot(y_test_iv, color = 'red', label = 'Actual SPY Price')
plt.plot(y_pred_iv, color = 'blue', label = 'Predicted SPY Price')
plt.title('SPY Price Prediction')
plt.xlabel('Time')
plt.ylabel('SPY Price')
plt.legend()
plt.show()
print('RMSE')
rmse= mean_squared_error(y_test_iv, y_pred_iv, squared=True)
print(rmse)
print('MSE')
mse= mean_squared_error(y_test_iv, y_pred_iv, squared=False)
print(rmse)
print('MAE')
mae = mean_absolute_error(y_test_iv, y_pred_iv)
print(mae)
print('MAPE')
mape = MAPE(y_test_iv,y_pred_iv)
print('R2')
r2 = r2_score(y_test_iv, y_pred_iv)
print(r2)
print('total absolute error')
tae = sum(abs(y_test_iv-y_pred_iv))[0]
print(tae)
#==================================================================================================
y_pred_iv_pct = [((y_pred_iv[i]/y_pred_iv[i-1]) -1) *100 for i in range(1,len(y_pred))]
y_test_iv_pct = [((y_test_iv[i]/y_test_iv[i-1]) -1) *100 for i in range(1,len(y_pred))]
print('rate of change accuracy')
rc = sum([y_pred_iv_pct[i]*y_test_iv_pct[i]>0 for i in range(len(y_test_iv_pct))])/len(y_test_iv_pct)
print(rc[0])
df_result['ResNet_1_layer'] = [rmse,mse,mae,mape,r2,tae,rc]
# Visualising the results
plt.figure(figsize=(14,5))
plt.plot(y_test_iv_pct, color = 'red', label = 'Actual SPY change rate')
plt.plot(y_pred_iv_pct, color = 'blue', label = 'Predicted SPY change rate')
plt.title('SPY Price Prediction')
plt.xlabel('Time')
plt.ylabel('SPY Price change rate')
plt.legend()
plt.show()
# Visualising the results
plt.figure(figsize=(14,5))
plt.plot(y_test_iv_pct[:30], color = 'red', label = 'Actual SPY change rate')
plt.plot(y_pred_iv_pct[:30], color = 'blue', label = 'Predicted SPY change rate')
plt.title('SPY Price Prediction')
plt.xlabel('Time')
plt.ylabel('SPY Price change rate')
plt.legend()
ax = plt.axes()
plt.grid(axis='both', which='both')
plt.show()
#df_result['ResNet_1_layer'] = [rmse,mae,r2,tae]
RMSE 52.50604020389592 MSE 52.50604020389592 MAE 5.176594064518268 MAPE R2 0.9790096816470808 total absolute error 8691.501434326172 rate of change accuracy 0.5005959475566151
C:\Users\ANPC\anaconda3\envs\py37\lib\site-packages\ipykernel_launcher.py:67: MatplotlibDeprecationWarning: Adding an axes using the same arguments as a previous axes currently reuses the earlier instance. In a future version, a new instance will always be created and returned. Meanwhile, this warning can be suppressed, and the future behavior ensured, by passing a unique label to each axes instance.
def get_resnet_model(categories=2):
def residual_block(X, kernels, stride):
out = keras.layers.Conv1D(kernels, stride, padding='same')(X)
out = keras.layers.ReLU()(out)
out = keras.layers.Conv1D(kernels, stride, padding='same')(out)
out = keras.layers.add([X, out])
out = keras.layers.ReLU()(out)
#out = keras.layers.MaxPool1D(5, 2)(out)
return out
kernels = 8
stride = 2
inputs = keras.layers.Input([lookback,6])
X = keras.layers.Conv1D(kernels, stride)(inputs)
X = residual_block(X, kernels, stride)
X = residual_block(X, kernels, stride)
X = residual_block(X, kernels, stride)
X = residual_block(X, kernels, stride)
X = residual_block(X, kernels, stride)
X = residual_block(X, kernels, stride)
X = keras.layers.MaxPool1D(5, 2)(X)
X = keras.layers.Flatten()(X)
X = keras.layers.Dense(64, activation='relu')(X)
X = keras.layers.Dense(32, activation='relu')(X)
output = keras.layers.Dense(outsize)(X)
model = keras.Model(inputs=inputs, outputs=output)
return model
import keras
optimizer = keras.optimizers.Adam(lr=0.001)
model = get_resnet_model()
model.summary()
#model.fit(X_train_sqr, y_train, epochs=1000, batch_size=128, callbacks=[early_stop])
Model: "model_3"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_3 (InputLayer) (None, 64, 6) 0
__________________________________________________________________________________________________
conv1d_15 (Conv1D) (None, 63, 8) 104 input_3[0][0]
__________________________________________________________________________________________________
conv1d_16 (Conv1D) (None, 63, 8) 136 conv1d_15[0][0]
__________________________________________________________________________________________________
re_lu_13 (ReLU) (None, 63, 8) 0 conv1d_16[0][0]
__________________________________________________________________________________________________
conv1d_17 (Conv1D) (None, 63, 8) 136 re_lu_13[0][0]
__________________________________________________________________________________________________
add_7 (Add) (None, 63, 8) 0 conv1d_15[0][0]
conv1d_17[0][0]
__________________________________________________________________________________________________
re_lu_14 (ReLU) (None, 63, 8) 0 add_7[0][0]
__________________________________________________________________________________________________
conv1d_18 (Conv1D) (None, 63, 8) 136 re_lu_14[0][0]
__________________________________________________________________________________________________
re_lu_15 (ReLU) (None, 63, 8) 0 conv1d_18[0][0]
__________________________________________________________________________________________________
conv1d_19 (Conv1D) (None, 63, 8) 136 re_lu_15[0][0]
__________________________________________________________________________________________________
add_8 (Add) (None, 63, 8) 0 re_lu_14[0][0]
conv1d_19[0][0]
__________________________________________________________________________________________________
re_lu_16 (ReLU) (None, 63, 8) 0 add_8[0][0]
__________________________________________________________________________________________________
conv1d_20 (Conv1D) (None, 63, 8) 136 re_lu_16[0][0]
__________________________________________________________________________________________________
re_lu_17 (ReLU) (None, 63, 8) 0 conv1d_20[0][0]
__________________________________________________________________________________________________
conv1d_21 (Conv1D) (None, 63, 8) 136 re_lu_17[0][0]
__________________________________________________________________________________________________
add_9 (Add) (None, 63, 8) 0 re_lu_16[0][0]
conv1d_21[0][0]
__________________________________________________________________________________________________
re_lu_18 (ReLU) (None, 63, 8) 0 add_9[0][0]
__________________________________________________________________________________________________
conv1d_22 (Conv1D) (None, 63, 8) 136 re_lu_18[0][0]
__________________________________________________________________________________________________
re_lu_19 (ReLU) (None, 63, 8) 0 conv1d_22[0][0]
__________________________________________________________________________________________________
conv1d_23 (Conv1D) (None, 63, 8) 136 re_lu_19[0][0]
__________________________________________________________________________________________________
add_10 (Add) (None, 63, 8) 0 re_lu_18[0][0]
conv1d_23[0][0]
__________________________________________________________________________________________________
re_lu_20 (ReLU) (None, 63, 8) 0 add_10[0][0]
__________________________________________________________________________________________________
conv1d_24 (Conv1D) (None, 63, 8) 136 re_lu_20[0][0]
__________________________________________________________________________________________________
re_lu_21 (ReLU) (None, 63, 8) 0 conv1d_24[0][0]
__________________________________________________________________________________________________
conv1d_25 (Conv1D) (None, 63, 8) 136 re_lu_21[0][0]
__________________________________________________________________________________________________
add_11 (Add) (None, 63, 8) 0 re_lu_20[0][0]
conv1d_25[0][0]
__________________________________________________________________________________________________
re_lu_22 (ReLU) (None, 63, 8) 0 add_11[0][0]
__________________________________________________________________________________________________
conv1d_26 (Conv1D) (None, 63, 8) 136 re_lu_22[0][0]
__________________________________________________________________________________________________
re_lu_23 (ReLU) (None, 63, 8) 0 conv1d_26[0][0]
__________________________________________________________________________________________________
conv1d_27 (Conv1D) (None, 63, 8) 136 re_lu_23[0][0]
__________________________________________________________________________________________________
add_12 (Add) (None, 63, 8) 0 re_lu_22[0][0]
conv1d_27[0][0]
__________________________________________________________________________________________________
re_lu_24 (ReLU) (None, 63, 8) 0 add_12[0][0]
__________________________________________________________________________________________________
max_pooling1d_2 (MaxPooling1D) (None, 30, 8) 0 re_lu_24[0][0]
__________________________________________________________________________________________________
flatten_3 (Flatten) (None, 240) 0 max_pooling1d_2[0][0]
__________________________________________________________________________________________________
dense_7 (Dense) (None, 64) 15424 flatten_3[0][0]
__________________________________________________________________________________________________
dense_8 (Dense) (None, 32) 2080 dense_7[0][0]
__________________________________________________________________________________________________
dense_9 (Dense) (None, 1) 33 dense_8[0][0]
==================================================================================================
Total params: 19,273
Trainable params: 19,273
Non-trainable params: 0
__________________________________________________________________________________________________
early_stop = EarlyStopping(monitor=monitor_p, patience=patience_p, verbose=1)
model.compile(optimizer=optimizer_p, loss=loss_p, metrics=metrics_p)
history = model.fit(X_train, y_train, epochs=epochs_p, batch_size=epochs_p, callbacks=[early_stop])
#model.fit(X_train, y_train, epochs=1000, batch_size=128)
Epoch 1/2000 3426/3426 [==============================] - 1s 218us/step - loss: 0.0443 - mean_squared_error: 0.0443 Epoch 2/2000 3426/3426 [==============================] - 0s 15us/step - loss: 0.0296 - mean_squared_error: 0.0296 Epoch 3/2000 3426/3426 [==============================] - 0s 15us/step - loss: 0.0051 - mean_squared_error: 0.0051 Epoch 4/2000 3426/3426 [==============================] - 0s 14us/step - loss: 0.0084 - mean_squared_error: 0.0084 Epoch 5/2000 3426/3426 [==============================] - 0s 13us/step - loss: 0.0081 - mean_squared_error: 0.0081 Epoch 6/2000 3426/3426 [==============================] - 0s 14us/step - loss: 0.0034 - mean_squared_error: 0.0034 Epoch 7/2000 3426/3426 [==============================] - 0s 15us/step - loss: 0.0032 - mean_squared_error: 0.0032 Epoch 8/2000 3426/3426 [==============================] - 0s 14us/step - loss: 0.0033 - mean_squared_error: 0.0033 Epoch 9/2000 3426/3426 [==============================] - 0s 13us/step - loss: 0.0012 - mean_squared_error: 0.0012 Epoch 10/2000 3426/3426 [==============================] - 0s 14us/step - loss: 0.0021 - mean_squared_error: 0.0021 Epoch 11/2000 3426/3426 [==============================] - 0s 14us/step - loss: 0.0019 - mean_squared_error: 0.0019 Epoch 12/2000 3426/3426 [==============================] - 0s 14us/step - loss: 0.0010 - mean_squared_error: 0.0010 Epoch 13/2000 3426/3426 [==============================] - 0s 13us/step - loss: 0.0016 - mean_squared_error: 0.0016 Epoch 14/2000 3426/3426 [==============================] - 0s 13us/step - loss: 0.0012 - mean_squared_error: 0.0012 Epoch 15/2000 3426/3426 [==============================] - 0s 14us/step - loss: 0.0010 - mean_squared_error: 0.0010 Epoch 16/2000 3426/3426 [==============================] - 0s 14us/step - loss: 0.0012 - mean_squared_error: 0.0012 Epoch 17/2000 3426/3426 [==============================] - 0s 14us/step - loss: 8.0622e-04 - mean_squared_error: 8.0622e-04 Epoch 18/2000 3426/3426 [==============================] - 0s 14us/step - loss: 9.9051e-04 - mean_squared_error: 9.9051e-04 Epoch 19/2000 3426/3426 [==============================] - 0s 14us/step - loss: 8.2887e-04 - mean_squared_error: 8.2887e-04 Epoch 20/2000 3426/3426 [==============================] - 0s 14us/step - loss: 7.7199e-04 - mean_squared_error: 7.7199e-04 Epoch 21/2000 3426/3426 [==============================] - 0s 14us/step - loss: 8.1800e-04 - mean_squared_error: 8.1800e-04 Epoch 22/2000 3426/3426 [==============================] - 0s 14us/step - loss: 6.7205e-04 - mean_squared_error: 6.7205e-04 Epoch 23/2000 3426/3426 [==============================] - 0s 14us/step - loss: 7.3642e-04 - mean_squared_error: 7.3642e-04 Epoch 24/2000 3426/3426 [==============================] - 0s 15us/step - loss: 6.3236e-04 - mean_squared_error: 6.3236e-04 Epoch 25/2000 3426/3426 [==============================] - 0s 14us/step - loss: 6.4928e-04 - mean_squared_error: 6.4928e-04 Epoch 26/2000 3426/3426 [==============================] - 0s 16us/step - loss: 6.0244e-04 - mean_squared_error: 6.0244e-04 Epoch 27/2000 3426/3426 [==============================] - 0s 16us/step - loss: 5.9486e-04 - mean_squared_error: 5.9486e-04 Epoch 28/2000 3426/3426 [==============================] - 0s 15us/step - loss: 5.8590e-04 - mean_squared_error: 5.8590e-04 Epoch 29/2000 3426/3426 [==============================] - 0s 15us/step - loss: 5.6722e-04 - mean_squared_error: 5.6722e-04 Epoch 30/2000 3426/3426 [==============================] - 0s 15us/step - loss: 5.6124e-04 - mean_squared_error: 5.6124e-04 Epoch 31/2000 3426/3426 [==============================] - 0s 14us/step - loss: 5.4305e-04 - mean_squared_error: 5.4305e-04 Epoch 32/2000 3426/3426 [==============================] - 0s 14us/step - loss: 5.3384e-04 - mean_squared_error: 5.3384e-04 Epoch 33/2000 3426/3426 [==============================] - 0s 15us/step - loss: 5.2536e-04 - mean_squared_error: 5.2536e-04 Epoch 34/2000 3426/3426 [==============================] - 0s 14us/step - loss: 5.1703e-04 - mean_squared_error: 5.1703e-04 Epoch 35/2000 3426/3426 [==============================] - 0s 16us/step - loss: 5.0719e-04 - mean_squared_error: 5.0719e-04 Epoch 36/2000 3426/3426 [==============================] - 0s 14us/step - loss: 4.9910e-04 - mean_squared_error: 4.9910e-04 Epoch 37/2000 3426/3426 [==============================] - 0s 14us/step - loss: 4.9254e-04 - mean_squared_error: 4.9254e-04 Epoch 38/2000 3426/3426 [==============================] - 0s 13us/step - loss: 4.8495e-04 - mean_squared_error: 4.8495e-04 Epoch 39/2000 3426/3426 [==============================] - 0s 14us/step - loss: 4.8037e-04 - mean_squared_error: 4.8037e-04 Epoch 40/2000 3426/3426 [==============================] - 0s 14us/step - loss: 4.7474e-04 - mean_squared_error: 4.7474e-04 Epoch 41/2000 3426/3426 [==============================] - 0s 14us/step - loss: 4.7010e-04 - mean_squared_error: 4.7010e-04 Epoch 42/2000 3426/3426 [==============================] - 0s 14us/step - loss: 4.6658e-04 - mean_squared_error: 4.6658e-04 Epoch 43/2000 3426/3426 [==============================] - 0s 15us/step - loss: 4.6222e-04 - mean_squared_error: 4.6222e-04 Epoch 44/2000 3426/3426 [==============================] - 0s 14us/step - loss: 4.5928e-04 - mean_squared_error: 4.5928e-04 Epoch 45/2000 3426/3426 [==============================] - 0s 14us/step - loss: 4.5455e-04 - mean_squared_error: 4.5455e-04 Epoch 46/2000 3426/3426 [==============================] - 0s 15us/step - loss: 4.5180e-04 - mean_squared_error: 4.5180e-04 Epoch 47/2000 3426/3426 [==============================] - 0s 14us/step - loss: 4.4900e-04 - mean_squared_error: 4.4900e-04 Epoch 48/2000 3426/3426 [==============================] - 0s 14us/step - loss: 4.4624e-04 - mean_squared_error: 4.4624e-04 Epoch 49/2000 3426/3426 [==============================] - 0s 14us/step - loss: 4.4389e-04 - mean_squared_error: 4.4389e-04 Epoch 50/2000 3426/3426 [==============================] - 0s 13us/step - loss: 4.4118e-04 - mean_squared_error: 4.4118e-04 Epoch 51/2000 3426/3426 [==============================] - 0s 14us/step - loss: 4.3938e-04 - mean_squared_error: 4.3938e-04 Epoch 52/2000 3426/3426 [==============================] - 0s 14us/step - loss: 4.3726e-04 - mean_squared_error: 4.3726e-04 Epoch 53/2000 3426/3426 [==============================] - 0s 14us/step - loss: 4.3512e-04 - mean_squared_error: 4.3512e-04 Epoch 54/2000 3426/3426 [==============================] - 0s 14us/step - loss: 4.3298e-04 - mean_squared_error: 4.3298e-04 Epoch 55/2000 3426/3426 [==============================] - 0s 13us/step - loss: 4.3134e-04 - mean_squared_error: 4.3134e-04 Epoch 56/2000 3426/3426 [==============================] - 0s 14us/step - loss: 4.3036e-04 - mean_squared_error: 4.3036e-04 Epoch 57/2000 3426/3426 [==============================] - 0s 15us/step - loss: 4.2737e-04 - mean_squared_error: 4.2737e-04 Epoch 58/2000 3426/3426 [==============================] - 0s 14us/step - loss: 4.2604e-04 - mean_squared_error: 4.2604e-04 Epoch 59/2000 3426/3426 [==============================] - 0s 13us/step - loss: 4.2379e-04 - mean_squared_error: 4.2379e-04 Epoch 60/2000 3426/3426 [==============================] - 0s 13us/step - loss: 4.2211e-04 - mean_squared_error: 4.2211e-04 Epoch 61/2000 3426/3426 [==============================] - 0s 13us/step - loss: 4.2024e-04 - mean_squared_error: 4.2024e-04 Epoch 62/2000 3426/3426 [==============================] - 0s 13us/step - loss: 4.1907e-04 - mean_squared_error: 4.1907e-04 Epoch 63/2000 3426/3426 [==============================] - 0s 14us/step - loss: 4.1740e-04 - mean_squared_error: 4.1740e-04 Epoch 64/2000 3426/3426 [==============================] - 0s 15us/step - loss: 4.1568e-04 - mean_squared_error: 4.1568e-04 Epoch 65/2000 3426/3426 [==============================] - 0s 14us/step - loss: 4.1478e-04 - mean_squared_error: 4.1478e-04 Epoch 66/2000 3426/3426 [==============================] - 0s 14us/step - loss: 4.1217e-04 - mean_squared_error: 4.1217e-04 Epoch 67/2000 3426/3426 [==============================] - 0s 14us/step - loss: 4.1042e-04 - mean_squared_error: 4.1042e-04 Epoch 68/2000 3426/3426 [==============================] - 0s 13us/step - loss: 4.0895e-04 - mean_squared_error: 4.0895e-04 Epoch 69/2000 3426/3426 [==============================] - 0s 14us/step - loss: 4.0718e-04 - mean_squared_error: 4.0718e-04 Epoch 70/2000 3426/3426 [==============================] - 0s 13us/step - loss: 4.0574e-04 - mean_squared_error: 4.0574e-04 Epoch 71/2000 3426/3426 [==============================] - 0s 14us/step - loss: 4.0466e-04 - mean_squared_error: 4.0466e-04 Epoch 72/2000 3426/3426 [==============================] - 0s 14us/step - loss: 4.0289e-04 - mean_squared_error: 4.0289e-04 Epoch 73/2000 3426/3426 [==============================] - 0s 13us/step - loss: 4.0233e-04 - mean_squared_error: 4.0233e-04 Epoch 74/2000 3426/3426 [==============================] - 0s 14us/step - loss: 4.0046e-04 - mean_squared_error: 4.0046e-04 Epoch 75/2000 3426/3426 [==============================] - 0s 14us/step - loss: 3.9915e-04 - mean_squared_error: 3.9915e-04 Epoch 76/2000 3426/3426 [==============================] - 0s 13us/step - loss: 3.9678e-04 - mean_squared_error: 3.9678e-04 Epoch 77/2000 3426/3426 [==============================] - 0s 14us/step - loss: 3.9595e-04 - mean_squared_error: 3.9595e-04 Epoch 78/2000 3426/3426 [==============================] - 0s 13us/step - loss: 3.9347e-04 - mean_squared_error: 3.9347e-04 Epoch 79/2000 3426/3426 [==============================] - 0s 15us/step - loss: 3.9215e-04 - mean_squared_error: 3.9215e-04 Epoch 80/2000 3426/3426 [==============================] - 0s 14us/step - loss: 3.9039e-04 - mean_squared_error: 3.9039e-04 Epoch 81/2000 3426/3426 [==============================] - 0s 13us/step - loss: 3.8862e-04 - mean_squared_error: 3.8862e-04 Epoch 82/2000 3426/3426 [==============================] - 0s 13us/step - loss: 3.8803e-04 - mean_squared_error: 3.8803e-04 Epoch 83/2000 3426/3426 [==============================] - 0s 14us/step - loss: 3.8603e-04 - mean_squared_error: 3.8603e-04 Epoch 84/2000 3426/3426 [==============================] - 0s 13us/step - loss: 3.8531e-04 - mean_squared_error: 3.8531e-04 Epoch 85/2000 3426/3426 [==============================] - 0s 14us/step - loss: 3.8435e-04 - mean_squared_error: 3.8435e-04 Epoch 86/2000 3426/3426 [==============================] - 0s 14us/step - loss: 3.8285e-04 - mean_squared_error: 3.8285e-04 Epoch 87/2000 3426/3426 [==============================] - 0s 14us/step - loss: 3.8307e-04 - mean_squared_error: 3.8307e-04 Epoch 88/2000 3426/3426 [==============================] - 0s 14us/step - loss: 3.8040e-04 - mean_squared_error: 3.8040e-04 Epoch 89/2000 3426/3426 [==============================] - 0s 14us/step - loss: 3.7869e-04 - mean_squared_error: 3.7869e-04 Epoch 90/2000 3426/3426 [==============================] - 0s 13us/step - loss: 3.7785e-04 - mean_squared_error: 3.7785e-04 Epoch 91/2000 3426/3426 [==============================] - 0s 14us/step - loss: 3.7786e-04 - mean_squared_error: 3.7786e-04 Epoch 92/2000 3426/3426 [==============================] - 0s 14us/step - loss: 3.7652e-04 - mean_squared_error: 3.7652e-04 Epoch 93/2000 3426/3426 [==============================] - 0s 14us/step - loss: 3.7405e-04 - mean_squared_error: 3.7405e-04 Epoch 94/2000 3426/3426 [==============================] - 0s 14us/step - loss: 3.7266e-04 - mean_squared_error: 3.7266e-04 Epoch 95/2000 3426/3426 [==============================] - 0s 14us/step - loss: 3.7215e-04 - mean_squared_error: 3.7215e-04 Epoch 96/2000 3426/3426 [==============================] - 0s 14us/step - loss: 3.7085e-04 - mean_squared_error: 3.7085e-04 Epoch 97/2000 3426/3426 [==============================] - 0s 15us/step - loss: 3.7047e-04 - mean_squared_error: 3.7047e-04 Epoch 98/2000 3426/3426 [==============================] - 0s 13us/step - loss: 3.6746e-04 - mean_squared_error: 3.6746e-04 Epoch 99/2000 3426/3426 [==============================] - 0s 14us/step - loss: 3.6733e-04 - mean_squared_error: 3.6733e-04 Epoch 100/2000 3426/3426 [==============================] - 0s 14us/step - loss: 3.6563e-04 - mean_squared_error: 3.6563e-04 Epoch 101/2000 3426/3426 [==============================] - 0s 14us/step - loss: 3.6491e-04 - mean_squared_error: 3.6491e-04 Epoch 102/2000 3426/3426 [==============================] - 0s 14us/step - loss: 3.6312e-04 - mean_squared_error: 3.6312e-04 Epoch 103/2000 3426/3426 [==============================] - 0s 14us/step - loss: 3.6197e-04 - mean_squared_error: 3.6197e-04 Epoch 104/2000 3426/3426 [==============================] - 0s 14us/step - loss: 3.6100e-04 - mean_squared_error: 3.6100e-04 Epoch 105/2000 3426/3426 [==============================] - 0s 15us/step - loss: 3.5964e-04 - mean_squared_error: 3.5964e-04 Epoch 106/2000 3426/3426 [==============================] - 0s 14us/step - loss: 3.5913e-04 - mean_squared_error: 3.5913e-04 Epoch 107/2000 3426/3426 [==============================] - 0s 14us/step - loss: 3.5692e-04 - mean_squared_error: 3.5692e-04 Epoch 108/2000 3426/3426 [==============================] - 0s 14us/step - loss: 3.5796e-04 - mean_squared_error: 3.5796e-04 Epoch 109/2000 3426/3426 [==============================] - 0s 14us/step - loss: 3.5481e-04 - mean_squared_error: 3.5481e-04 Epoch 110/2000 3426/3426 [==============================] - 0s 13us/step - loss: 3.5452e-04 - mean_squared_error: 3.5452e-04 Epoch 111/2000 3426/3426 [==============================] - 0s 13us/step - loss: 3.5294e-04 - mean_squared_error: 3.5294e-04 Epoch 112/2000 3426/3426 [==============================] - 0s 15us/step - loss: 3.5158e-04 - mean_squared_error: 3.5158e-04 Epoch 113/2000 3426/3426 [==============================] - 0s 14us/step - loss: 3.5039e-04 - mean_squared_error: 3.5039e-04 Epoch 114/2000 3426/3426 [==============================] - 0s 13us/step - loss: 3.4947e-04 - mean_squared_error: 3.4947e-04 Epoch 115/2000 3426/3426 [==============================] - 0s 14us/step - loss: 3.4862e-04 - mean_squared_error: 3.4862e-04 Epoch 116/2000 3426/3426 [==============================] - 0s 14us/step - loss: 3.4801e-04 - mean_squared_error: 3.4801e-04 Epoch 117/2000 3426/3426 [==============================] - 0s 13us/step - loss: 3.4789e-04 - mean_squared_error: 3.4789e-04 Epoch 118/2000 3426/3426 [==============================] - 0s 13us/step - loss: 3.4584e-04 - mean_squared_error: 3.4584e-04 Epoch 119/2000 3426/3426 [==============================] - 0s 14us/step - loss: 3.4403e-04 - mean_squared_error: 3.4403e-04 Epoch 120/2000 3426/3426 [==============================] - 0s 14us/step - loss: 3.4380e-04 - mean_squared_error: 3.4380e-04 Epoch 121/2000 3426/3426 [==============================] - 0s 13us/step - loss: 3.4294e-04 - mean_squared_error: 3.4294e-04 Epoch 122/2000 3426/3426 [==============================] - 0s 14us/step - loss: 3.4188e-04 - mean_squared_error: 3.4188e-04 Epoch 123/2000 3426/3426 [==============================] - ETA: 0s - loss: 3.3895e-04 - mean_squared_error: 3.3895e- - 0s 13us/step - loss: 3.4085e-04 - mean_squared_error: 3.4085e-04 Epoch 124/2000 3426/3426 [==============================] - 0s 13us/step - loss: 3.3852e-04 - mean_squared_error: 3.3852e-04 Epoch 125/2000 3426/3426 [==============================] - 0s 13us/step - loss: 3.3959e-04 - mean_squared_error: 3.3959e-04 Epoch 126/2000 3426/3426 [==============================] - 0s 13us/step - loss: 3.3755e-04 - mean_squared_error: 3.3755e-04 Epoch 127/2000 3426/3426 [==============================] - 0s 14us/step - loss: 3.3689e-04 - mean_squared_error: 3.3689e-04 Epoch 128/2000 3426/3426 [==============================] - 0s 13us/step - loss: 3.3597e-04 - mean_squared_error: 3.3597e-04 Epoch 129/2000 3426/3426 [==============================] - 0s 14us/step - loss: 3.3742e-04 - mean_squared_error: 3.3742e-04 Epoch 130/2000 3426/3426 [==============================] - 0s 15us/step - loss: 3.3518e-04 - mean_squared_error: 3.3518e-04 Epoch 131/2000 3426/3426 [==============================] - 0s 14us/step - loss: 3.3270e-04 - mean_squared_error: 3.3270e-04 Epoch 132/2000 3426/3426 [==============================] - 0s 15us/step - loss: 3.3197e-04 - mean_squared_error: 3.3197e-04 Epoch 133/2000 3426/3426 [==============================] - 0s 14us/step - loss: 3.2948e-04 - mean_squared_error: 3.2948e-04 Epoch 134/2000 3426/3426 [==============================] - 0s 15us/step - loss: 3.2999e-04 - mean_squared_error: 3.2999e-04 Epoch 135/2000 3426/3426 [==============================] - 0s 13us/step - loss: 3.2812e-04 - mean_squared_error: 3.2812e-04 Epoch 136/2000 3426/3426 [==============================] - 0s 14us/step - loss: 3.2623e-04 - mean_squared_error: 3.2623e-04 Epoch 137/2000 3426/3426 [==============================] - 0s 14us/step - loss: 3.2592e-04 - mean_squared_error: 3.2592e-04 Epoch 138/2000 3426/3426 [==============================] - 0s 13us/step - loss: 3.2422e-04 - mean_squared_error: 3.2422e-04 Epoch 139/2000 3426/3426 [==============================] - 0s 14us/step - loss: 3.2497e-04 - mean_squared_error: 3.2497e-04 Epoch 140/2000 3426/3426 [==============================] - 0s 14us/step - loss: 3.2249e-04 - mean_squared_error: 3.2249e-04 Epoch 141/2000 3426/3426 [==============================] - 0s 14us/step - loss: 3.2302e-04 - mean_squared_error: 3.2302e-04 Epoch 142/2000 3426/3426 [==============================] - 0s 14us/step - loss: 3.2278e-04 - mean_squared_error: 3.2278e-04 Epoch 143/2000 3426/3426 [==============================] - 0s 14us/step - loss: 3.2082e-04 - mean_squared_error: 3.2082e-04 Epoch 144/2000 3426/3426 [==============================] - 0s 13us/step - loss: 3.2167e-04 - mean_squared_error: 3.2167e-04 Epoch 145/2000 3426/3426 [==============================] - 0s 13us/step - loss: 3.1928e-04 - mean_squared_error: 3.1928e-04 Epoch 146/2000 3426/3426 [==============================] - 0s 14us/step - loss: 3.1875e-04 - mean_squared_error: 3.1875e-04 Epoch 147/2000 3426/3426 [==============================] - 0s 13us/step - loss: 3.1977e-04 - mean_squared_error: 3.1977e-04 Epoch 148/2000 3426/3426 [==============================] - 0s 15us/step - loss: 3.1589e-04 - mean_squared_error: 3.1589e-04 Epoch 149/2000 3426/3426 [==============================] - 0s 14us/step - loss: 3.1569e-04 - mean_squared_error: 3.1569e-04 Epoch 150/2000 3426/3426 [==============================] - 0s 14us/step - loss: 3.1400e-04 - mean_squared_error: 3.1400e-04 Epoch 151/2000 3426/3426 [==============================] - 0s 13us/step - loss: 3.1398e-04 - mean_squared_error: 3.1398e-04 Epoch 152/2000 3426/3426 [==============================] - 0s 14us/step - loss: 3.1387e-04 - mean_squared_error: 3.1387e-04 Epoch 153/2000 3426/3426 [==============================] - 0s 14us/step - loss: 3.1128e-04 - mean_squared_error: 3.1128e-04 Epoch 154/2000 3426/3426 [==============================] - 0s 13us/step - loss: 3.1073e-04 - mean_squared_error: 3.1073e-04 Epoch 155/2000 3426/3426 [==============================] - 0s 13us/step - loss: 3.1011e-04 - mean_squared_error: 3.1011e-04 Epoch 156/2000 3426/3426 [==============================] - 0s 13us/step - loss: 3.0884e-04 - mean_squared_error: 3.0884e-04 Epoch 157/2000 3426/3426 [==============================] - 0s 14us/step - loss: 3.0818e-04 - mean_squared_error: 3.0818e-04 Epoch 158/2000 3426/3426 [==============================] - 0s 14us/step - loss: 3.0727e-04 - mean_squared_error: 3.0727e-04 Epoch 159/2000 3426/3426 [==============================] - 0s 14us/step - loss: 3.0661e-04 - mean_squared_error: 3.0661e-04 Epoch 160/2000 3426/3426 [==============================] - 0s 14us/step - loss: 3.0582e-04 - mean_squared_error: 3.0582e-04 Epoch 161/2000 3426/3426 [==============================] - 0s 15us/step - loss: 3.0520e-04 - mean_squared_error: 3.0520e-04 Epoch 162/2000 3426/3426 [==============================] - 0s 14us/step - loss: 3.0466e-04 - mean_squared_error: 3.0466e-04 Epoch 163/2000 3426/3426 [==============================] - 0s 15us/step - loss: 3.0390e-04 - mean_squared_error: 3.0390e-04 Epoch 164/2000 3426/3426 [==============================] - 0s 15us/step - loss: 3.0268e-04 - mean_squared_error: 3.0268e-04 Epoch 165/2000 3426/3426 [==============================] - 0s 14us/step - loss: 3.0200e-04 - mean_squared_error: 3.0200e-04 Epoch 166/2000 3426/3426 [==============================] - 0s 13us/step - loss: 3.0201e-04 - mean_squared_error: 3.0201e-04 Epoch 167/2000 3426/3426 [==============================] - 0s 13us/step - loss: 3.0331e-04 - mean_squared_error: 3.0331e-04 Epoch 168/2000 3426/3426 [==============================] - 0s 13us/step - loss: 3.0274e-04 - mean_squared_error: 3.0274e-04 Epoch 169/2000 3426/3426 [==============================] - 0s 15us/step - loss: 3.0320e-04 - mean_squared_error: 3.0320e-04 Epoch 170/2000 3426/3426 [==============================] - 0s 15us/step - loss: 3.0197e-04 - mean_squared_error: 3.0197e-04 Epoch 171/2000 3426/3426 [==============================] - 0s 13us/step - loss: 2.9982e-04 - mean_squared_error: 2.9982e-04 Epoch 172/2000 3426/3426 [==============================] - 0s 13us/step - loss: 2.9829e-04 - mean_squared_error: 2.9829e-04 Epoch 173/2000 3426/3426 [==============================] - 0s 14us/step - loss: 3.0010e-04 - mean_squared_error: 3.0010e-04 Epoch 174/2000 3426/3426 [==============================] - 0s 14us/step - loss: 2.9592e-04 - mean_squared_error: 2.9592e-04 Epoch 175/2000 3426/3426 [==============================] - 0s 13us/step - loss: 2.9736e-04 - mean_squared_error: 2.9736e-04 Epoch 176/2000 3426/3426 [==============================] - 0s 14us/step - loss: 2.9738e-04 - mean_squared_error: 2.9738e-04 Epoch 177/2000 3426/3426 [==============================] - 0s 14us/step - loss: 2.9864e-04 - mean_squared_error: 2.9864e-04 Epoch 178/2000 3426/3426 [==============================] - 0s 14us/step - loss: 2.9687e-04 - mean_squared_error: 2.9687e-04 Epoch 179/2000 3426/3426 [==============================] - 0s 14us/step - loss: 2.9877e-04 - mean_squared_error: 2.9877e-04 Epoch 180/2000 3426/3426 [==============================] - 0s 15us/step - loss: 3.0283e-04 - mean_squared_error: 3.0283e-04 Epoch 181/2000 3426/3426 [==============================] - 0s 14us/step - loss: 2.9883e-04 - mean_squared_error: 2.9883e-04 Epoch 182/2000 3426/3426 [==============================] - 0s 13us/step - loss: 2.9839e-04 - mean_squared_error: 2.9839e-04 Epoch 183/2000 3426/3426 [==============================] - 0s 13us/step - loss: 2.9558e-04 - mean_squared_error: 2.9558e-04 Epoch 184/2000 3426/3426 [==============================] - 0s 14us/step - loss: 2.9183e-04 - mean_squared_error: 2.9183e-04 Epoch 185/2000 3426/3426 [==============================] - 0s 13us/step - loss: 2.8976e-04 - mean_squared_error: 2.8976e-04 Epoch 186/2000 3426/3426 [==============================] - 0s 14us/step - loss: 2.8715e-04 - mean_squared_error: 2.8715e-04 Epoch 187/2000 3426/3426 [==============================] - 0s 15us/step - loss: 2.8894e-04 - mean_squared_error: 2.8894e-04 Epoch 188/2000 3426/3426 [==============================] - 0s 13us/step - loss: 2.8863e-04 - mean_squared_error: 2.8863e-04 Epoch 189/2000 3426/3426 [==============================] - 0s 13us/step - loss: 2.8677e-04 - mean_squared_error: 2.8677e-04 Epoch 190/2000 3426/3426 [==============================] - 0s 13us/step - loss: 2.8464e-04 - mean_squared_error: 2.8464e-04 Epoch 191/2000 3426/3426 [==============================] - ETA: 0s - loss: 2.7612e-04 - mean_squared_error: 2.7612e- - 0s 15us/step - loss: 2.8416e-04 - mean_squared_error: 2.8416e-04 Epoch 192/2000 3426/3426 [==============================] - 0s 14us/step - loss: 2.8378e-04 - mean_squared_error: 2.8378e-04 Epoch 193/2000 3426/3426 [==============================] - 0s 14us/step - loss: 2.8515e-04 - mean_squared_error: 2.8515e-04 Epoch 194/2000 3426/3426 [==============================] - 0s 14us/step - loss: 2.8524e-04 - mean_squared_error: 2.8524e-04 Epoch 195/2000 3426/3426 [==============================] - 0s 13us/step - loss: 2.8782e-04 - mean_squared_error: 2.8782e-04 Epoch 196/2000 3426/3426 [==============================] - 0s 14us/step - loss: 2.9156e-04 - mean_squared_error: 2.9156e-04 Epoch 197/2000 3426/3426 [==============================] - 0s 14us/step - loss: 2.9482e-04 - mean_squared_error: 2.9482e-04 Epoch 198/2000 3426/3426 [==============================] - 0s 13us/step - loss: 2.9176e-04 - mean_squared_error: 2.9176e-04 Epoch 199/2000 3426/3426 [==============================] - 0s 14us/step - loss: 2.8470e-04 - mean_squared_error: 2.8470e-04 Epoch 200/2000 3426/3426 [==============================] - 0s 14us/step - loss: 2.8400e-04 - mean_squared_error: 2.8400e-04 Epoch 201/2000 3426/3426 [==============================] - 0s 14us/step - loss: 2.8422e-04 - mean_squared_error: 2.8422e-04 Epoch 202/2000 3426/3426 [==============================] - 0s 15us/step - loss: 2.8317e-04 - mean_squared_error: 2.8317e-04 Epoch 203/2000 3426/3426 [==============================] - 0s 14us/step - loss: 2.8203e-04 - mean_squared_error: 2.8203e-04 Epoch 204/2000 3426/3426 [==============================] - 0s 14us/step - loss: 2.8104e-04 - mean_squared_error: 2.8104e-04 Epoch 205/2000 3426/3426 [==============================] - 0s 15us/step - loss: 2.8474e-04 - mean_squared_error: 2.8474e-04 Epoch 206/2000 3426/3426 [==============================] - 0s 14us/step - loss: 2.9001e-04 - mean_squared_error: 2.9001e-04 Epoch 207/2000 3426/3426 [==============================] - 0s 14us/step - loss: 2.7619e-04 - mean_squared_error: 2.7619e-04 Epoch 208/2000 3426/3426 [==============================] - 0s 14us/step - loss: 2.7706e-04 - mean_squared_error: 2.7706e-04 Epoch 209/2000 3426/3426 [==============================] - 0s 14us/step - loss: 2.7465e-04 - mean_squared_error: 2.7465e-04 Epoch 210/2000 3426/3426 [==============================] - 0s 14us/step - loss: 2.7509e-04 - mean_squared_error: 2.7509e-04 Epoch 211/2000 3426/3426 [==============================] - 0s 15us/step - loss: 2.7414e-04 - mean_squared_error: 2.7414e-04 Epoch 212/2000 3426/3426 [==============================] - 0s 14us/step - loss: 2.7342e-04 - mean_squared_error: 2.7342e-04 Epoch 213/2000 3426/3426 [==============================] - 0s 13us/step - loss: 2.7276e-04 - mean_squared_error: 2.7276e-04 Epoch 214/2000 3426/3426 [==============================] - 0s 13us/step - loss: 2.7337e-04 - mean_squared_error: 2.7337e-04 Epoch 215/2000 3426/3426 [==============================] - 0s 14us/step - loss: 2.7161e-04 - mean_squared_error: 2.7161e-04 Epoch 216/2000 3426/3426 [==============================] - 0s 14us/step - loss: 2.7123e-04 - mean_squared_error: 2.7123e-04 Epoch 217/2000 3426/3426 [==============================] - 0s 13us/step - loss: 2.7010e-04 - mean_squared_error: 2.7010e-04 Epoch 218/2000 3426/3426 [==============================] - 0s 13us/step - loss: 2.6970e-04 - mean_squared_error: 2.6970e-04 Epoch 219/2000 3426/3426 [==============================] - 0s 14us/step - loss: 2.7059e-04 - mean_squared_error: 2.7059e-04 Epoch 220/2000 3426/3426 [==============================] - 0s 13us/step - loss: 2.6984e-04 - mean_squared_error: 2.6984e-04 Epoch 221/2000 3426/3426 [==============================] - 0s 13us/step - loss: 2.6906e-04 - mean_squared_error: 2.6906e-04 Epoch 222/2000 3426/3426 [==============================] - 0s 13us/step - loss: 2.6779e-04 - mean_squared_error: 2.6779e-04 Epoch 223/2000 3426/3426 [==============================] - 0s 13us/step - loss: 2.6703e-04 - mean_squared_error: 2.6703e-04 Epoch 224/2000 3426/3426 [==============================] - 0s 14us/step - loss: 2.6749e-04 - mean_squared_error: 2.6749e-04 Epoch 225/2000 3426/3426 [==============================] - 0s 15us/step - loss: 2.6605e-04 - mean_squared_error: 2.6605e-04 Epoch 226/2000 3426/3426 [==============================] - 0s 15us/step - loss: 2.6649e-04 - mean_squared_error: 2.6649e-04 Epoch 227/2000 3426/3426 [==============================] - 0s 16us/step - loss: 2.7289e-04 - mean_squared_error: 2.7289e-04 Epoch 228/2000 3426/3426 [==============================] - 0s 13us/step - loss: 2.7360e-04 - mean_squared_error: 2.7360e-04 Epoch 229/2000 3426/3426 [==============================] - 0s 14us/step - loss: 2.6989e-04 - mean_squared_error: 2.6989e-04 Epoch 230/2000 3426/3426 [==============================] - 0s 14us/step - loss: 2.7439e-04 - mean_squared_error: 2.7439e-04 Epoch 231/2000 3426/3426 [==============================] - 0s 14us/step - loss: 2.6388e-04 - mean_squared_error: 2.6388e-04 Epoch 232/2000 3426/3426 [==============================] - 0s 14us/step - loss: 2.6478e-04 - mean_squared_error: 2.6478e-04 Epoch 233/2000 3426/3426 [==============================] - 0s 15us/step - loss: 2.7395e-04 - mean_squared_error: 2.7395e-04 Epoch 234/2000 3426/3426 [==============================] - 0s 14us/step - loss: 2.6418e-04 - mean_squared_error: 2.6418e-04 Epoch 235/2000 3426/3426 [==============================] - 0s 14us/step - loss: 2.6322e-04 - mean_squared_error: 2.6322e-04 Epoch 236/2000 3426/3426 [==============================] - 0s 14us/step - loss: 2.7146e-04 - mean_squared_error: 2.7146e-04 Epoch 237/2000 3426/3426 [==============================] - 0s 15us/step - loss: 2.6034e-04 - mean_squared_error: 2.6034e-04 Epoch 238/2000 3426/3426 [==============================] - 0s 15us/step - loss: 2.6250e-04 - mean_squared_error: 2.6250e-04 Epoch 239/2000 3426/3426 [==============================] - 0s 13us/step - loss: 2.6188e-04 - mean_squared_error: 2.6188e-04 Epoch 240/2000 3426/3426 [==============================] - 0s 14us/step - loss: 2.6174e-04 - mean_squared_error: 2.6174e-04 Epoch 241/2000 3426/3426 [==============================] - 0s 13us/step - loss: 2.6226e-04 - mean_squared_error: 2.6226e-04 Epoch 242/2000 3426/3426 [==============================] - 0s 14us/step - loss: 2.7377e-04 - mean_squared_error: 2.7377e-04 Epoch 243/2000 3426/3426 [==============================] - 0s 13us/step - loss: 2.5846e-04 - mean_squared_error: 2.5846e-04 Epoch 244/2000 3426/3426 [==============================] - 0s 13us/step - loss: 2.5789e-04 - mean_squared_error: 2.5789e-04 Epoch 245/2000 3426/3426 [==============================] - 0s 13us/step - loss: 2.5544e-04 - mean_squared_error: 2.5544e-04 Epoch 246/2000 3426/3426 [==============================] - 0s 13us/step - loss: 2.5469e-04 - mean_squared_error: 2.5469e-04 Epoch 247/2000 3426/3426 [==============================] - 0s 13us/step - loss: 2.5528e-04 - mean_squared_error: 2.5528e-04 Epoch 248/2000 3426/3426 [==============================] - 0s 14us/step - loss: 2.5812e-04 - mean_squared_error: 2.5812e-04 Epoch 249/2000 3426/3426 [==============================] - 0s 13us/step - loss: 2.5451e-04 - mean_squared_error: 2.5451e-04 Epoch 250/2000 3426/3426 [==============================] - 0s 13us/step - loss: 2.5298e-04 - mean_squared_error: 2.5298e-04 Epoch 251/2000 3426/3426 [==============================] - 0s 14us/step - loss: 2.5414e-04 - mean_squared_error: 2.5414e-04 Epoch 252/2000 3426/3426 [==============================] - 0s 14us/step - loss: 2.5330e-04 - mean_squared_error: 2.5330e-04 Epoch 253/2000 3426/3426 [==============================] - 0s 14us/step - loss: 2.5243e-04 - mean_squared_error: 2.5243e-04 Epoch 254/2000 3426/3426 [==============================] - 0s 14us/step - loss: 2.5284e-04 - mean_squared_error: 2.5284e-04 Epoch 255/2000 3426/3426 [==============================] - 0s 15us/step - loss: 2.5267e-04 - mean_squared_error: 2.5267e-04 Epoch 256/2000 3426/3426 [==============================] - 0s 15us/step - loss: 2.5093e-04 - mean_squared_error: 2.5093e-04 Epoch 257/2000 3426/3426 [==============================] - 0s 13us/step - loss: 2.4989e-04 - mean_squared_error: 2.4989e-04 Epoch 258/2000 3426/3426 [==============================] - 0s 14us/step - loss: 2.4928e-04 - mean_squared_error: 2.4928e-04 Epoch 259/2000 3426/3426 [==============================] - 0s 13us/step - loss: 2.4927e-04 - mean_squared_error: 2.4927e-04 Epoch 260/2000 3426/3426 [==============================] - 0s 15us/step - loss: 2.4966e-04 - mean_squared_error: 2.4966e-04 Epoch 261/2000 3426/3426 [==============================] - 0s 15us/step - loss: 2.5100e-04 - mean_squared_error: 2.5100e-04 Epoch 262/2000 3426/3426 [==============================] - 0s 15us/step - loss: 2.5191e-04 - mean_squared_error: 2.5191e-04 Epoch 263/2000 3426/3426 [==============================] - 0s 13us/step - loss: 2.5311e-04 - mean_squared_error: 2.5311e-04 Epoch 264/2000 3426/3426 [==============================] - 0s 14us/step - loss: 2.4783e-04 - mean_squared_error: 2.4783e-04 Epoch 265/2000 3426/3426 [==============================] - 0s 13us/step - loss: 2.4786e-04 - mean_squared_error: 2.4786e-04 Epoch 266/2000 3426/3426 [==============================] - 0s 13us/step - loss: 2.4621e-04 - mean_squared_error: 2.4621e-04 Epoch 267/2000 3426/3426 [==============================] - 0s 13us/step - loss: 2.4548e-04 - mean_squared_error: 2.4548e-04 Epoch 268/2000 3426/3426 [==============================] - 0s 13us/step - loss: 2.4540e-04 - mean_squared_error: 2.4540e-04 Epoch 269/2000 3426/3426 [==============================] - 0s 14us/step - loss: 2.4633e-04 - mean_squared_error: 2.4633e-04 Epoch 270/2000 3426/3426 [==============================] - 0s 15us/step - loss: 2.5068e-04 - mean_squared_error: 2.5068e-04 Epoch 271/2000 3426/3426 [==============================] - 0s 14us/step - loss: 2.4392e-04 - mean_squared_error: 2.4392e-04 Epoch 272/2000 3426/3426 [==============================] - 0s 14us/step - loss: 2.4411e-04 - mean_squared_error: 2.4411e-04 Epoch 273/2000 3426/3426 [==============================] - 0s 14us/step - loss: 2.4349e-04 - mean_squared_error: 2.4349e-04 Epoch 274/2000 3426/3426 [==============================] - 0s 13us/step - loss: 2.4726e-04 - mean_squared_error: 2.4726e-04 Epoch 275/2000 3426/3426 [==============================] - ETA: 0s - loss: 2.5445e-04 - mean_squared_error: 2.5445e- - 0s 14us/step - loss: 2.4507e-04 - mean_squared_error: 2.4507e-04 Epoch 276/2000 3426/3426 [==============================] - 0s 13us/step - loss: 2.5269e-04 - mean_squared_error: 2.5269e-04 Epoch 277/2000 3426/3426 [==============================] - 0s 14us/step - loss: 2.4431e-04 - mean_squared_error: 2.4431e-04 Epoch 278/2000 3426/3426 [==============================] - 0s 15us/step - loss: 2.4406e-04 - mean_squared_error: 2.4406e-04 Epoch 279/2000 3426/3426 [==============================] - 0s 14us/step - loss: 2.4699e-04 - mean_squared_error: 2.4699e-04 Epoch 280/2000 3426/3426 [==============================] - 0s 14us/step - loss: 2.4198e-04 - mean_squared_error: 2.4198e-04 Epoch 281/2000 3426/3426 [==============================] - 0s 13us/step - loss: 2.4705e-04 - mean_squared_error: 2.4705e-04 Epoch 282/2000 3426/3426 [==============================] - 0s 13us/step - loss: 2.4202e-04 - mean_squared_error: 2.4202e-04 Epoch 283/2000 3426/3426 [==============================] - 0s 14us/step - loss: 2.4232e-04 - mean_squared_error: 2.4232e-04 Epoch 284/2000 3426/3426 [==============================] - 0s 14us/step - loss: 2.3960e-04 - mean_squared_error: 2.3960e-04 Epoch 285/2000 3426/3426 [==============================] - 0s 13us/step - loss: 2.4273e-04 - mean_squared_error: 2.4273e-04 Epoch 286/2000 3426/3426 [==============================] - 0s 14us/step - loss: 2.4366e-04 - mean_squared_error: 2.4366e-04 Epoch 287/2000 3426/3426 [==============================] - 0s 13us/step - loss: 2.5449e-04 - mean_squared_error: 2.5449e-04 Epoch 288/2000 3426/3426 [==============================] - 0s 14us/step - loss: 2.5720e-04 - mean_squared_error: 2.5720e-04 Epoch 289/2000 3426/3426 [==============================] - 0s 13us/step - loss: 2.4098e-04 - mean_squared_error: 2.4098e-04 Epoch 290/2000 3426/3426 [==============================] - 0s 14us/step - loss: 2.4008e-04 - mean_squared_error: 2.4008e-04 Epoch 291/2000 3426/3426 [==============================] - 0s 14us/step - loss: 2.4011e-04 - mean_squared_error: 2.4011e-04 Epoch 292/2000 3426/3426 [==============================] - 0s 14us/step - loss: 2.4363e-04 - mean_squared_error: 2.4363e-04 Epoch 293/2000 3426/3426 [==============================] - 0s 15us/step - loss: 2.3972e-04 - mean_squared_error: 2.3972e-04 Epoch 294/2000 3426/3426 [==============================] - 0s 14us/step - loss: 2.4459e-04 - mean_squared_error: 2.4459e-04 Epoch 00294: early stopping
#=================================================================================================================
# summarize history for loss
plt.plot(history.history['loss'])
#plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
#=================================================================================================================
# summarize history for loss
plt.plot(history.history['loss'][20:])
#plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
history.model.save('RES_3')
model = tf.keras.models.load_model('RES_3')
y_pred = model.predict(X_test)
y_pred_iv = scaler_single.inverse_transform(y_pred.reshape(-1, 1))
y_test_iv = scaler_single.inverse_transform(y_test.reshape(-1, 1))
# Visualising the results
plt.figure(figsize=(14,5))
plt.plot(y_test_iv, color = 'red', label = 'Actual SPY Price')
plt.plot(y_pred_iv, color = 'blue', label = 'Predicted SPY Price')
plt.title('SPY Price Prediction')
plt.xlabel('Time')
plt.ylabel('SPY Price')
plt.legend()
plt.show()
print('RMSE')
rmse= mean_squared_error(y_test_iv, y_pred_iv, squared=True)
print(rmse)
print('MSE')
mse= mean_squared_error(y_test_iv, y_pred_iv, squared=False)
print(rmse)
print('MAE')
mae = mean_absolute_error(y_test_iv, y_pred_iv)
print(mae)
print('MAPE')
mape = MAPE(y_test_iv,y_pred_iv)
print('R2')
r2 = r2_score(y_test_iv, y_pred_iv)
print(r2)
print('total absolute error')
tae = sum(abs(y_test_iv-y_pred_iv))[0]
print(tae)
#==================================================================================================
y_pred_iv_pct = [((y_pred_iv[i]/y_pred_iv[i-1]) -1) *100 for i in range(1,len(y_pred))]
y_test_iv_pct = [((y_test_iv[i]/y_test_iv[i-1]) -1) *100 for i in range(1,len(y_pred))]
print('rate of change accuracy')
rc = sum([y_pred_iv_pct[i]*y_test_iv_pct[i]>0 for i in range(len(y_test_iv_pct))])/len(y_test_iv_pct)
print(rc[0])
df_result['ResNet_deeper'] = [rmse,mse,mae,mape,r2,tae,rc]
# Visualising the results
plt.figure(figsize=(14,5))
plt.plot(y_test_iv_pct, color = 'red', label = 'Actual SPY change rate')
plt.plot(y_pred_iv_pct, color = 'blue', label = 'Predicted SPY change rate')
plt.title('SPY Price Prediction')
plt.xlabel('Time')
plt.ylabel('SPY Price change rate')
plt.legend()
plt.show()
# Visualising the results
plt.figure(figsize=(14,5))
plt.plot(y_test_iv_pct[:30], color = 'red', label = 'Actual SPY change rate')
plt.plot(y_pred_iv_pct[:30], color = 'blue', label = 'Predicted SPY change rate')
plt.title('SPY Price Prediction')
plt.xlabel('Time')
plt.ylabel('SPY Price change rate')
plt.legend()
ax = plt.axes()
plt.grid(axis='both', which='both')
plt.show()
#df_result['ResNet_deeper'] = [rmse,mae,r2,tae]
RMSE 31.794003615552285 MSE 31.794003615552285 MAE 3.8561632657349287 MAPE R2 0.9872897240962615 total absolute error 6474.498123168945 rate of change accuracy 0.4934445768772348
C:\Users\ANPC\anaconda3\envs\py37\lib\site-packages\ipykernel_launcher.py:68: MatplotlibDeprecationWarning: Adding an axes using the same arguments as a previous axes currently reuses the earlier instance. In a future version, a new instance will always be created and returned. Meanwhile, this warning can be suppressed, and the future behavior ensured, by passing a unique label to each axes instance.
import numpy as np import pandas as pd import matplotlib.pyplot as plt from pandas.plotting import lag_plot from pandas import datetime from statsmodels.tsa.arima_model import ARIMA from sklearn.metrics import mean_squared_error
df= data df.head()
plt.figure(figsize=(10,10)) lag_plot(df['Close'], lag=10) plt.title('Autocorrelation plot')
train_data, test_data = data_training, data_test plt.figure(figsize=(12,7)) plt.title('SPY Prices') plt.xlabel('Dates') plt.ylabel('Prices') plt.plot(df['Adj Close'], 'blue', label='Training Data') plt.plot(test_data['Adj Close'], 'green', label='Testing Data') plt.legend()
def smape_kun(y_true, y_pred): return np.mean((np.abs(y_pred - y_true) * 200/ (np.abs(y_pred) + np.abs(y_true))))
train_ar = train_data['Adj Close'].values test_ar = test_data['Adj Close'].values history = [x for x in train_ar] print(type(history)) predictions = list()
for t in range(len(test_ar)):
model = ARIMA(history, order=(5,1,0))
model_fit = model.fit(disp=0)
output = model_fit.forecast()
#print(model)
#print(history)
#print(output)
yhat = output[0]
predictions.append(yhat)
obs = test_ar[t]
history.append(obs)
#print('predicted=%f, expected=%f' % (yhat, obs))
error = mean_squared_error(test_ar, predictions) print('Testing Mean Squared Error: %.3f' % error) error2 = smape_kun(test_ar, predictions) print('Symmetric mean absolute percentage error: %.3f' % error2)
y_test_iv = test_ar y_pred_iv =predictions
plt.figure(figsize=(14,5)) plt.plot(y_test_iv, color = 'red', label = 'Actual SPY Price') plt.plot(y_pred_iv, color = 'blue', label = 'Predicted SPY Price') plt.title('SPY Price Prediction') plt.xlabel('Time') plt.ylabel('SPY Price') plt.legend() plt.show()
print('RMSE') rmse= mean_squared_error(y_test_iv, y_pred_iv, squared=False) print(rmse)
print('MAE') mae = mean_absolute_error(y_test_iv, y_pred_iv) print(mae)
print('R2') r2 = r2_score(y_test_iv, y_pred_iv) print(r2)
print('total absolute error') tae = sum(abs(y_test_iv-y_pred_iv))[0] print(tae)
y_pred_iv_pct = [((y_pred_iv[i]/y_pred_iv[i-1]) -1) 100 for i in range(1,len(y_pred))] y_test_iv_pct = [((y_test_iv[i]/y_test_iv[i-1]) -1) 100 for i in range(1,len(y_pred))]
print('rate of change accuracy') rc = sum([y_pred_iv_pct[i]*y_test_iv_pct[i]>0 for i in range(len(y_test_iv_pct))])/len(y_test_iv_pct) print(rc[0])
plt.figure(figsize=(14,5)) plt.plot(y_test_iv_pct, color = 'red', label = 'Actual SPY change rate') plt.plot(y_pred_iv_pct, color = 'blue', label = 'Predicted SPY change rate') plt.title('SPY Price Prediction') plt.xlabel('Time') plt.ylabel('SPY Price change rate') plt.legend() plt.show()
plt.figure(figsize=(14,5)) plt.plot(y_test_iv_pct[:30], color = 'red', label = 'Actual SPY change rate') plt.plot(y_pred_iv_pct[:30], color = 'blue', label = 'Predicted SPY change rate') plt.title('SPY Price Prediction') plt.xlabel('Time') plt.ylabel('SPY Price change rate') plt.legend()
ax = plt.axes()
plt.grid(axis='both', which='both')
plt.show()
df_result2 = df_result[:-1]
df_result.to_csv('df_result.csv')
df_result2.to_csv('df_result2.csv')
df_result2.round(2).T
| RMSE | MSE | MAE | MAPE | R2 | error_abs_sum | |
|---|---|---|---|---|---|---|
| LSTM | 27.2 | 5.21536 | 3.54664 | 1.50072 | 0.989126 | 5954.81 |
| LSTM_deeper | 39.8185 | 6.31019 | 4.84973 | 1.98746 | 0.984082 | 8142.69 |
| DNN | 152.742 | 12.3589 | 9.0168 | 3.49513 | 0.938939 | 15139.2 |
| DNN_deeper | 557.807 | 23.6179 | 17.5475 | 6.54315 | 0.777006 | 29462.2 |
| CNN | 40.2911 | 6.34752 | 4.42298 | 1.94335 | 0.983893 | 7426.18 |
| CNN_deeper | 36.4718 | 6.03919 | 4.37638 | 1.96222 | 0.98542 | 7347.95 |
| ResNet_5_layer | 31.189 | 5.58471 | 3.99305 | 1.75475 | 0.987532 | 6704.34 |
| ResNet_1_layer | 52.506 | 7.24611 | 5.17659 | 2.30307 | 0.97901 | 8691.5 |
| ResNet_deeper | 31.794 | 5.63862 | 3.85616 | 1.69629 | 0.98729 | 6474.5 |
from datetime import datetime
datetime.now().strftime('%Y-%m-%d %H:%M:%S')
'2020-12-07 21:49:01'
%time
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense, LSTM, Dropout, Flatten
from keras.callbacks import EarlyStopping
import tensorflow as tf
from pandas_datareader import data
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import r2_score
import pickle
#Defining MAPE function
def MAPE(Y_actual,Y_Predicted):
mape = np.mean(np.abs((Y_actual - Y_Predicted)/Y_actual))*100
return mape
loop_test_range = 100
df_result_loop = pd.DataFrame(data=None, columns=['type','RMSE','MSE','MAE','MAPE','R2','tae'])
Wall time: 0 ns
Using TensorFlow backend.
import warnings
warnings.filterwarnings('ignore')
from scipy import stats
model_lstm1 = tf.keras.models.load_model('LSTM_1')
model_lstm2 = tf.keras.models.load_model('LSTM_2')
model_dnn1 = tf.keras.models.load_model('DNN_1')
model_dnn2 = tf.keras.models.load_model('DNN_2')
model_cnn1 = tf.keras.models.load_model('CNN_1')
model_cnn2 = tf.keras.models.load_model('CNN_2')
model_res1 = tf.keras.models.load_model('RES_1')
model_res2 = tf.keras.models.load_model('RES_2')
model_res3 = tf.keras.models.load_model('RES_3')
models =[model_lstm1,model_cnn1,model_res1,model_res2,model_res3]
# bandit setting
arms ={
0:model_lstm1,
1:model_lstm2,
2:model_dnn1,
3:model_dnn2,
4:model_cnn1,
5:model_cnn2,
6:model_res1,
7:model_res2,
8:model_res3,
}
cnnInput1 = 4
cnnInput2 = 5
scores={0:1,1:1,2:1,3:1,4:1,5:1,6:1,7:1,8:1}
scores_ucb1={0:5,1:5,2:5,3:5,4:5,5:5,6:5,7:5,8:5}
arms_name = ['lstm1','lstm2','dnn1','dnn2','cnn1','cnn2','res1','res2','res3']
infile = open('X_test','rb')
X_test = pickle.load(infile)
infile.close()
infile = open('y_test','rb')
y_test = pickle.load(infile)
infile.close()
infile = open('scaler_single','rb')
scaler_single = pickle.load(infile)
infile.close()
df_result2 = pd.read_csv('df_result2.csv')
print(scaler_single.inverse_transform(X_test[0]))
print(scaler_single.inverse_transform(y_test[0].reshape(-1, 1)))
[[156.79499817 157.60825276 158.45252492 157.32874927 158.98608934 182.96647311] [157.57939148 158.47539846 159.4837339 158.4646084 159.98478322 178.13824458] [157.42253113 158.85338157 160.1636388 159.75793068 159.78505122 177.38128541] [157.46611023 158.90897131 160.20897617 159.29683273 159.84052104 180.52120224] [157.8495636 158.9312106 160.48093121 159.46551873 160.32877741 168.73020395] [157.7449646 159.49818527 160.65092041 159.97161106 160.19562274 167.50522654] [157.33537292 159.14242448 160.02766128 159.71293631 159.67407771 185.84584543] [156.65559387 157.98623587 158.80381171 158.41963119 158.8085385 192.88723857] [156.63813782 158.08628722 157.87460316 157.47495524 158.78634041 195.59305395] [155.94960022 157.2636201 158.35054178 157.82358442 157.90971062 188.86029942] [157.69268799 158.78668066 159.91433515 159.24059834 160.1290454 197.5565475 ] [158.09358215 159.40924507 161.05887026 160.14029707 160.63948292 173.46600819] [157.52706909 159.06461242 160.46960552 159.58922752 159.91820589 178.00441754] [155.75794983 158.49763775 158.04457507 159.4093015 157.66558243 198.75388126] [155.24371338 156.28529491 157.20600667 156.9576229 157.01088274 192.4698655 ] [155.2263031 156.0629529 157.21734966 156.80017691 156.98868465 189.22581451] [156.19363403 157.34143217 158.497845 157.30625208 158.22041613 184.36914773] [155.69688416 156.89674815 157.76127704 157.78984722 157.58789759 181.73066408] [158.35505676 159.47594598 156.70741666 157.27251488 160.97238653 242.37940393] [158.17201233 159.44259553 160.54893726 159.81414791 160.73936586 201.23812925] [159.09208679 159.76500586 160.39029106 159.26309553 160.81703377 226.56313933] [159.94204712 160.48762162 162.09007924 161.242419 161.8934125 179.93738179] [160.29255676 160.89895518 162.61133794 161.3436306 162.33727268 163.11314869] [161.10746765 161.95510941 163.5065694 162.24332933 163.36927216 170.63933026] [161.09867859 162.19967374 163.89185057 163.09805084 163.35818158 169.99068735] [161.07243347 162.02181031 163.80119312 162.83937609 163.32489292 167.91762292] [161.83474731 162.76666537 164.19779999 163.06431364 164.29029813 180.15564533] [160.28378296 162.07740006 162.55467487 162.96308488 162.3261821 194.17277271] [160.25750732 161.55488701 162.72466406 162.11962054 162.29289343 178.1778072 ] [159.79307556 161.51040843 162.10140494 162.41203249 161.70477107 189.31790424] [160.77450562 161.76610089 163.08727655 161.96217455 162.94759313 180.16580782] [160.80950928 161.81057947 163.01928779 162.36703812 162.99198931 184.53112027] [160.91467285 162.14410096 162.91730465 163.10929086 163.12514398 182.06414432] [161.35282898 162.24415232 163.15526531 162.92934768 163.67997768 186.80789454] [159.20602417 162.19967374 161.26283654 162.6144557 160.96129595 206.82574257] [160.940979 161.74387856 161.95408443 161.06247582 163.15843265 188.05821544] [161.80845642 163.04459712 163.94851363 163.09805084 164.25700946 185.3439104 ] [161.59819031 162.73331492 164.08449115 163.30047405 163.99068319 174.37210086] [160.91467285 162.49984478 163.5065694 163.09805084 163.12514398 189.24283563] [161.38783264 162.85560557 163.20060268 163.77281202 163.72435692 181.20171272] [161.49302673 162.63326356 164.1751486 163.53665161 163.85752852 169.76355778] [160.16984558 161.33252804 161.80678121 162.27706653 162.18191992 199.55094672] [156.75247192 159.39813391 158.41853054 160.28650304 157.85422386 231.4102291 ] [155.98141479 157.0190388 156.4807644 157.42996087 156.87771114 219.76978623] [156.91021729 156.7744575 157.61395653 156.39531334 158.0539728 190.33624434] [155.40309143 155.94066225 156.20880936 155.76552938 156.14534352 234.72253189] [157.05041504 157.34143217 157.77260273 157.17130328 158.23150671 193.88052792] [156.13035583 156.76332938 156.25412944 155.12448824 157.06635256 225.55563085] [152.61659241 155.74054257 152.75256993 156.20413015 152.61659241 250.71477904] [153.68563843 152.92787461 153.06986234 152.8077756 153.97038798 213.1492373 ] [153.49282837 152.61659241 152.61659241 152.61659241 153.7262598 212.82223931] [155.51699829 154.75110622 154.32770629 153.51629114 156.28958877 199.71015907] [157.44468689 157.40813307 157.17201229 156.58649653 158.73085365 215.56431345] [157.73388672 157.63049205 158.84914908 158.14971642 159.09704592 182.70626317] [159.46012878 160.26527961 159.78968333 158.66704876 161.28309204 193.41046046] [159.53898621 160.69885247 161.68212939 161.01749861 161.38297498 183.75132686] [160.36265564 161.11018603 160.68491479 159.43178153 162.4260481 186.18718801] [161.24765015 162.39979342 162.76998414 161.68101977 163.54682301 184.49603249] [161.44041443 162.54432336 163.88050758 163.18800527 163.79095119 177.78899784] [160.37141418 163.05570828 162.99661911 162.7156673 162.43715561 197.05314874] [161.31777954 162.57767382 162.69066968 162.16461491 163.63559843 188.05068767] [161.13374329 162.98900738 164.05049677 163.49165724 163.40256083 193.53688519] [162.02752686 164.38977729 164.50376671 163.30047405 164.53442631 191.84209124] [161.96615601 163.76721289 164.53776109 164.17767559 164.45674147 193.10554392]] [[162.82492065]]
preds_all =[]
delta_all = []
sl=8
X_test_sqr = np.reshape(X_test,(y_test.shape[0],sl,sl,6))
for i in range(len(models)):
if i == 1:
y_pred = models[i].predict(X_test_sqr)
else:
y_pred = models[i].predict(X_test)
y_pred_ivt = scaler_single.inverse_transform(y_pred.reshape(-1, 1))
y_test_ivt = scaler_single.inverse_transform(y_test.reshape(-1, 1))
delta = abs(y_pred_ivt-y_test_ivt)
preds_all.append(y_pred)
delta_all.append(delta)
stats.describe(delta)
DescribeResult(nobs=1679, minmax=(array([0.00613403]), array([43.3348999])), mean=array([3.85616327]), variance=array([16.93409431]), skewness=array([3.53419818]), kurtosis=array([20.9730641]))
import matplotlib.pyplot as plt
x = delta
plt.hist(x, density=True, bins=100,cumulative=True) # `density=False` would make counts
plt.ylabel('cumulative probability distribution')
plt.xlabel('loss')
plt.figure()
plt.hist(x, density=True, bins=100,cumulative=False) # `density=False` would make counts
plt.ylabel('probability distribution')
plt.xlabel('loss')
Text(0.5, 0, 'loss')
from itertools import chain
newlist = list(chain(*delta))
val_max = max(newlist)
val_min = min(newlist)
print(len(newlist))
print(max(newlist))
print(min(newlist))
1679 43.33489990234375 0.006134033203125
val_max = 10
#arms={0: model_lstm, 1:model_dnn, 2:model_cnn }
#scores={0:1,1:1,2:1,3:1,4:1}
epsilon = 0.10
history = pd.DataFrame(data=None, columns=['time','action','prediction','reward','explore','score','count','prediction_iv','true'])
y_history= []
hisplot = pd.DataFrame(data=None, columns=['lstm1','lstm2','dnn1','dnn2','cnn1','cnn2',
'res1','res2','res3'])
arms_name = ['lstm1','lstm2','dnn1','dnn2','cnn1','cnn2','res1','res2','res3']
for i in range(len(X_test)):
explore = np.random.binomial(1, epsilon)
if explore == 1 or i == 0:
action = np.random.choice(len(arms))
else:
action = max(scores, key= lambda x: scores[x])
x_test= X_test[i:i+1]
#print(action)
action = 0
y_pred = arms[action](x_test)
loss_avg = np.mean(abs(y_test[i]- y_pred)) # average loss is the reward here
reward = np.exp(-loss_avg)
#reward = -loss_avg
y_pred_iv = float(scaler_single.inverse_transform(y_pred))
y_test_iv = float(scaler_single.inverse_transform(y_test[i].reshape(-1, 1)))
loss_iv = abs(y_pred_iv-y_test_iv)
reward = float(1-(loss_iv- val_min)/ val_max)
#print(reward)
history.loc[i]=[i,action,y_pred[0].numpy(),reward,bool(explore),None,None,y_pred_iv ,y_test_iv ]
score = history[['action', 'reward']].groupby('action').agg({'reward': ['mean', 'count']})
mu, ct = score.loc[action]
history['score'].loc[i] = mu
history['count'].loc[i] = ct
scores[action] = mu
y_history.append(y_pred[0])
#hisplot.loc[i,'model_'+str(action)] = y_pred_iv
hisplot.loc[i, arms_name[action]] = y_pred_iv
#y_history = history['prediction'].values
y_pred_iv = scaler_single.inverse_transform(y_history)
y_test_iv = scaler_single.inverse_transform(y_test.reshape(-1, 1))
# Visualising the results
plt.figure(figsize=(14,5))
plt.plot(y_test_iv, color = 'red', label = 'Actual SPY Price')
plt.plot(y_pred_iv, color = 'blue', label = 'Predicted SPY Price')
plt.title('SPY Price Prediction')
plt.xlabel('Time')
plt.ylabel('SPY Price')
plt.legend()
plt.show()
hisplot.plot.line(figsize=(14,5))
print("The R2 score on the Test set is:\t{:0.3f}".format(r2_score(y_test_iv, y_pred_iv)))
error_cnn = sum(abs(y_pred_iv-y_test_iv))
print('The error is: ' + str(error_cnn))
print('RMSE')
rmse= mean_squared_error(y_test_iv, y_pred_iv, squared=True)
print(rmse)
print('MSE')
mse= mean_squared_error(y_test_iv, y_pred_iv, squared=False)
print(rmse)
print('MAE')
mae = mean_absolute_error(y_test_iv, y_pred_iv)
print(mae)
print('MAPE')
mape = MAPE(y_test_iv,y_pred_iv)
print('R2')
r2 = r2_score(y_test_iv, y_pred_iv)
print(r2)
print('total absolute error')
tae = sum(abs(y_test_iv-y_pred_iv))[0]
print(tae)
#df_result['Epsilon-greedy bandit'] = [rmse,mae,r2,tae]
history[['action', 'reward']].groupby('action').describe()
The R2 score on the Test set is: 0.989 The error is: [5954.81330776] RMSE 27.199960201050832 MSE 27.199960201050832 MAE 3.5466428277330153 MAPE R2 0.9891262829649754 total absolute error 5954.813307763748
| reward | ||||||||
|---|---|---|---|---|---|---|---|---|
| count | mean | std | min | 25% | 50% | 75% | max | |
| action | ||||||||
| 0 | 1679.0 | 0.645949 | 0.382492 | -2.915375 | 0.545243 | 0.752925 | 0.881702 | 1.000387 |
history_best= history
history_best
| time | action | prediction | reward | explore | score | count | prediction_iv | true | |
|---|---|---|---|---|---|---|---|---|---|
| 0 | 0 | 0 | [0.07568419] | 0.480083 | False | 0.480083 | 1 | 168.030228 | 162.824921 |
| 1 | 1 | 0 | [0.07495767] | 0.536061 | True | 0.508072 | 2 | 167.882267 | 163.236740 |
| 2 | 2 | 0 | [0.07529041] | 0.414497 | True | 0.47688 | 3 | 167.950032 | 162.088867 |
| 3 | 3 | 0 | [0.07295966] | 0.689792 | False | 0.530108 | 4 | 167.475358 | 164.367142 |
| 4 | 4 | 0 | [0.06864908] | 0.792472 | False | 0.582581 | 5 | 166.597477 | 164.516068 |
| ... | ... | ... | ... | ... | ... | ... | ... | ... | ... |
| 1674 | 1674 | 0 | [0.9155488] | 0.570089 | False | 0.646157 | 1675 | 339.074764 | 343.380005 |
| 1675 | 1675 | 0 | [0.91201794] | 0.563180 | False | 0.646108 | 1676 | 338.355679 | 342.730011 |
| 1676 | 1676 | 0 | [0.9134832] | 0.405024 | True | 0.645964 | 1677 | 338.654090 | 344.609985 |
| 1677 | 1677 | 0 | [0.91343075] | 0.286954 | False | 0.64575 | 1678 | 338.643407 | 345.779999 |
| 1678 | 1678 | 0 | [0.91607875] | 0.979881 | False | 0.645949 | 1679 | 339.182691 | 339.390015 |
1679 rows × 9 columns
#arms={0: model_lstm, 1:model_dnn, 2:model_cnn }
#scores={0:1,1:1,2:1,3:1,4:1}
epsilon = 0.10
history = pd.DataFrame(data=None, columns=['time','action','prediction','reward','explore','score','count','prediction_iv','true'])
y_history= []
hisplot = pd.DataFrame(data=None, columns=['lstm1','lstm2','dnn1','dnn2','cnn1','cnn2',
'res1','res2','res3'])
for i in range(len(X_test)):
explore = np.random.binomial(1, epsilon)
if explore == 1 or i == 0:
action = np.random.choice(len(arms))
else:
action = max(scores, key= lambda x: scores[x])
if action == cnnInput1 or action == cnnInput2 :
x_test = X_test_sqr[i:i+1]
else:
x_test= X_test[i:i+1]
#print(action)
y_pred = arms[action](x_test)
loss_avg = np.mean(abs(y_test[i]- y_pred)) # average loss is the reward here
reward = np.exp(-loss_avg)
#reward = -loss_avg
y_pred_iv = float(scaler_single.inverse_transform(y_pred))
y_test_iv = float(scaler_single.inverse_transform(y_test[i].reshape(-1, 1)))
loss_iv = abs(y_pred_iv-y_test_iv)
reward = float(1-(loss_iv- val_min)/ val_max)
#print(reward)
history.loc[i]=[i,action,y_pred[0].numpy(),reward,bool(explore),None,None,y_pred_iv ,y_test_iv ]
score = history[['action', 'reward']].groupby('action').agg({'reward': ['mean', 'count']})
mu, ct = score.loc[action]
history['score'].loc[i] = mu
history['count'].loc[i] = ct
scores[action] = mu
y_history.append(y_pred[0])
#hisplot.loc[i,'model_'+str(action)] = y_pred_iv
hisplot.loc[i, arms_name[action]] = y_pred_iv
#y_history = history['prediction'].values
y_pred_iv = scaler_single.inverse_transform(y_history)
y_test_iv = scaler_single.inverse_transform(y_test.reshape(-1, 1))
# Visualising the results
plt.figure(figsize=(14,5))
plt.plot(y_test_iv, color = 'red', label = 'Actual SPY Price')
plt.plot(y_pred_iv, color = 'blue', label = 'Predicted SPY Price')
plt.title('SPY Price Prediction')
plt.xlabel('Time')
plt.ylabel('SPY Price')
plt.legend()
plt.show()
hisplot.plot.line(figsize=(14,5))
print("The R2 score on the Test set is:\t{:0.3f}".format(r2_score(y_test_iv, y_pred_iv)))
error_cnn = sum(abs(y_pred_iv-y_test_iv))
print('The error is: ' + str(error_cnn))
print('RMSE')
rmse= mean_squared_error(y_test_iv, y_pred_iv, squared=True)
print(rmse)
print('MSE')
mse= mean_squared_error(y_test_iv, y_pred_iv, squared=False)
print(rmse)
print('MAE')
mae = mean_absolute_error(y_test_iv, y_pred_iv)
print(mae)
print('MAPE')
mape = MAPE(y_test_iv,y_pred_iv)
print('R2')
r2 = r2_score(y_test_iv, y_pred_iv)
print(r2)
print('total absolute error')
tae = sum(abs(y_test_iv-y_pred_iv))[0]
print(tae)
#df_result['Epsilon-greedy bandit'] = [rmse,mae,r2,tae]
history[['action', 'reward']].groupby('action').describe()
The R2 score on the Test set is: 0.984 The error is: [6875.02078892] RMSE 38.919540868487154 MSE 38.919540868487154 MAE 4.0947116074586285 MAPE R2 0.9844411509646013 total absolute error 6875.0207889230505
| reward | ||||||||
|---|---|---|---|---|---|---|---|---|
| count | mean | std | min | 25% | 50% | 75% | max | |
| action | ||||||||
| 0 | 932.0 | 0.622330 | 0.409165 | -2.344289 | 0.525001 | 0.738825 | 0.871831 | 1.000362 |
| 1 | 121.0 | 0.617679 | 0.438105 | -2.666749 | 0.560141 | 0.694065 | 0.836637 | 0.995150 |
| 2 | 19.0 | 0.026360 | 1.015105 | -2.083749 | -0.357159 | 0.483740 | 0.729387 | 0.993346 |
| 3 | 16.0 | -1.293380 | 1.492380 | -4.212686 | -2.038611 | -1.209398 | 0.078498 | 0.788624 |
| 4 | 31.0 | 0.543402 | 0.476470 | -1.613480 | 0.451657 | 0.633985 | 0.823887 | 0.987036 |
| 5 | 16.0 | 0.507976 | 0.403923 | -0.622521 | 0.345786 | 0.549401 | 0.805914 | 0.915391 |
| 6 | 25.0 | 0.548750 | 0.305276 | -0.115059 | 0.431529 | 0.536545 | 0.724616 | 0.974040 |
| 7 | 24.0 | 0.422700 | 0.503445 | -1.213540 | 0.331673 | 0.573661 | 0.760115 | 0.910120 |
| 8 | 495.0 | 0.624512 | 0.343938 | -2.273006 | 0.475716 | 0.713561 | 0.865490 | 0.999945 |
cumulative_regret = history_best['reward'].cumsum() -history['reward'].cumsum()
cumulative_regret.plot(figsize=(14,5))
plt.show()
regret_gap = (history_best['reward'].cumsum() -history['reward'].cumsum())/(history_best['time'].cumsum()+1)
regret_gap.plot(figsize=(14,5))
plt.show()
regret_gap[0:100].plot(figsize=(14,5))
plt.show()
y_pred_iv_pct = [((y_pred_iv[i]/y_pred_iv[i-1]) -1) *100 for i in range(1,len(y_pred_iv))]
y_test_iv_pct = [((y_test_iv[i]/y_test_iv[i-1]) -1) *100 for i in range(1,len(y_pred_iv))]
print('rate of change accuracy')
rc = sum([y_pred_iv_pct[i]*y_test_iv_pct[i]>0 for i in range(len(y_test_iv_pct))])/len(y_test_iv_pct)
print(rc[0])
#df_result2['Epsilon-greedy bandit'] = [rmse,mse,mae,mape,r2,tae,rc]
df_result2['Epsilon-greedy bandit all models'] = [rmse,mse,mae,mape,r2,tae]
# Visualising the results
plt.figure(figsize=(14,5))
plt.plot(y_test_iv_pct, color = 'red', label = 'Actual SPY change rate')
plt.plot(y_pred_iv_pct, color = 'blue', label = 'Predicted SPY change rate')
plt.title('SPY Price Prediction')
plt.xlabel('Time')
plt.ylabel('SPY Price change rate')
plt.legend()
plt.show()
plt.figure(figsize=(14,5))
plt.plot(y_test_iv_pct[:30], color = 'red', label = 'Actual SPY change rate')
plt.plot(y_pred_iv_pct[:30], color = 'blue', label = 'Predicted SPY change rate')
plt.title('SPY Price Prediction')
plt.xlabel('Time')
plt.ylabel('SPY Price change rate')
plt.legend()
ax = plt.axes()
plt.grid(axis='both', which='both')
plt.show()
rate of change accuracy 0.5321811680572109
df_result2.round(2).T
| 0 | 1 | 2 | 3 | 4 | 5 | |
|---|---|---|---|---|---|---|
| Unnamed: 0 | RMSE | MSE | MAE | MAPE | R2 | error_abs_sum |
| LSTM | 27.2 | 5.22 | 3.55 | 1.5 | 0.99 | 5954.81 |
| LSTM_deeper | 39.82 | 6.31 | 4.85 | 1.99 | 0.98 | 8142.69 |
| DNN | 152.74 | 12.36 | 9.02 | 3.5 | 0.94 | 15139.2 |
| DNN_deeper | 557.81 | 23.62 | 17.55 | 6.54 | 0.78 | 29462.2 |
| CNN | 40.29 | 6.35 | 4.42 | 1.94 | 0.98 | 7426.18 |
| CNN_deeper | 36.47 | 6.04 | 4.38 | 1.96 | 0.99 | 7347.95 |
| ResNet_5_layer | 31.19 | 5.58 | 3.99 | 1.75 | 0.99 | 6704.34 |
| ResNet_1_layer | 52.51 | 7.25 | 5.18 | 2.3 | 0.98 | 8691.5 |
| ResNet_deeper | 31.79 | 5.64 | 3.86 | 1.7 | 0.99 | 6474.5 |
| Epsilon-greedy bandit all models | 38.92 | 6.24 | 4.09 | 1.76 | 0.98 | 6875.02 |
%time
for j in range(0,loop_test_range):
epsilon = 0.10
history = pd.DataFrame(data=None, columns=['time','action','prediction','reward','explore','score','count','prediction_iv','true'])
y_history= []
hisplot = pd.DataFrame(data=None, columns=['lstm1','lstm2','dnn1','dnn2','cnn1','cnn2',
'res1','res2','res3'])
for i in range(len(X_test)):
explore = np.random.binomial(1, epsilon)
if explore == 1 or i == 0:
action = np.random.choice(len(arms))
else:
action = max(scores, key= lambda x: scores[x])
if action == cnnInput1 or action == cnnInput2 :
x_test = X_test_sqr[i:i+1]
else:
x_test= X_test[i:i+1]
#print(action)
y_pred = arms[action](x_test)
loss_avg = np.mean(abs(y_test[i]- y_pred)) # average loss is the reward here
reward = np.exp(-loss_avg)
#reward = -loss_avg
y_pred_iv = float(scaler_single.inverse_transform(y_pred))
y_test_iv = float(scaler_single.inverse_transform(y_test[i].reshape(-1, 1)))
loss_iv = abs(y_pred_iv-y_test_iv)
reward = float(1-(loss_iv- val_min)/ val_max)
#print(reward)
history.loc[i]=[i,action,y_pred[0].numpy(),reward,bool(explore),None,None,y_pred_iv ,y_test_iv ]
score = history[['action', 'reward']].groupby('action').agg({'reward': ['mean', 'count']})
mu, ct = score.loc[action]
history['score'].loc[i] = mu
history['count'].loc[i] = ct
scores[action] = mu
y_history.append(y_pred[0])
#hisplot.loc[i,'model_'+str(action)] = y_pred_iv
hisplot.loc[i, arms_name[action]] = y_pred_iv
#y_history = history['prediction'].values
y_pred_iv = scaler_single.inverse_transform(y_history)
y_test_iv = scaler_single.inverse_transform(y_test.reshape(-1, 1))
# Visualising the results
#plt.figure(figsize=(14,5))
#plt.plot(y_test_iv, color = 'red', label = 'Actual SPY Price')
#plt.plot(y_pred_iv, color = 'blue', label = 'Predicted SPY Price')
#plt.title('SPY Price Prediction')
#plt.xlabel('Time')
#plt.ylabel('SPY Price')
#plt.legend()
#plt.show()
#hisplot.plot.line(figsize=(14,5))
#print("The R2 score on the Test set is:\t{:0.3f}".format(r2_score(y_test_iv, y_pred_iv)))
#error_cnn = sum(abs(y_pred_iv-y_test_iv))
#print('The error is: ' + str(error_cnn))
#print('RMSE')
rmse= mean_squared_error(y_test_iv, y_pred_iv, squared=True)
#print(rmse)
#print('MSE')
mse= mean_squared_error(y_test_iv, y_pred_iv, squared=False)
#print(rmse)
#print('MAE')
mae = mean_absolute_error(y_test_iv, y_pred_iv)
#print(mae)
#print('MAPE')
mape = MAPE(y_test_iv,y_pred_iv)
#print('R2')
r2 = r2_score(y_test_iv, y_pred_iv)
#print(r2)
#print('total absolute error')
tae = sum(abs(y_test_iv-y_pred_iv))[0]
#print(tae)
df_result_loop.loc[len(df_result_loop)] = ['Epsilon-greedy bandit all models',rmse,mse,mae,mape,r2,tae]
#df_result_loop
Wall time: 0 ns
#scores={0:1,1:1,2:1}
#scores_ucb1={0:5,1:5,2:5}
epsilon = 0.10
history = pd.DataFrame(data=None, columns=['time','action','prediction','reward','explore','score','count','score_ucb1'])
y_history= []
hisplot = pd.DataFrame(data=None, columns=['lstm1','lstm2','dnn1','dnn2','cnn1','cnn2',
'res1','res2','res3'])
for i in range(len(X_test)):
action = max(scores_ucb1, key= lambda x: scores_ucb1[x])
if action == cnnInput1 or action == cnnInput2 :
x_test = X_test_sqr[i:i+1]
else:
x_test= X_test[i:i+1]
y_pred = arms[action](x_test)
loss_avg = np.mean(abs(y_test[i]- y_pred)) # average loss is the reward here
reward = np.exp(-loss_avg)
y_pred_iv = float(scaler_single.inverse_transform(y_pred))
y_test_iv = float(scaler_single.inverse_transform(y_test[i].reshape(-1, 1)))
loss_iv = abs(y_pred_iv-y_test_iv)
reward = float(1-(loss_iv- val_min)/ val_max)
history.loc[i]=[i,action,y_pred[0].numpy(),reward,'explore',None,None,None]
score = history[['action', 'reward']].groupby('action').agg({'reward': ['mean', 'count']})
ubc1_arr = []
for actionX in range(len(scores)):
try:
mu, ct = score.loc[actionX]
history['score'].loc[i] = mu
history['count'].loc[i] = ct
ucb1 = mu + np.sqrt((2*np.log10(i+1))/ct)
scores[actionX] = mu
scores_ucb1[actionX] = ucb1
ubc1_arr.append(ucb1)
except:
ubc1_arr.append(0)
history['score_ucb1'][i] = ubc1_arr
y_history.append(y_pred[0])
hisplot.loc[i, arms_name[action]] = y_pred_iv
#y_history = history['prediction'].values
y_pred_iv = scaler_single.inverse_transform(y_history)
y_test_iv = scaler_single.inverse_transform(y_test.reshape(-1, 1))
# Visualising the results
plt.figure(figsize=(14,5))
plt.plot(y_test_iv, color = 'red', label = 'Actual SPY Price')
plt.plot(y_pred_iv, color = 'blue', label = 'Predicted SPY Price')
plt.title('SPY Price Prediction')
plt.xlabel('Time')
plt.ylabel('SPY Price')
plt.legend()
plt.show()
hisplot.plot.line(figsize=(14,5))
print("The R2 score on the Test set is:\t{:0.3f}".format(r2_score(y_test_iv, y_pred_iv)))
error_cnn = sum(abs(y_pred_iv-y_test_iv))
print('The error is: ' + str(error_cnn))
print('RMSE')
rmse= mean_squared_error(y_test_iv, y_pred_iv, squared=True)
print(rmse)
print('MSE')
mse= mean_squared_error(y_test_iv, y_pred_iv, squared=False)
print(rmse)
print('MAE')
mae = mean_absolute_error(y_test_iv, y_pred_iv)
print(mae)
print('MAPE')
mape = MAPE(y_test_iv,y_pred_iv)
print('R2')
r2 = r2_score(y_test_iv, y_pred_iv)
print(r2)
print('total absolute error')
tae = sum(abs(y_test_iv-y_pred_iv))[0]
print(tae)
#df_result['UCB1 bandit'] = [rmse,mae,r2,tae]
history[['action', 'reward']].groupby('action').describe()
The R2 score on the Test set is: 0.987 The error is: [5924.67960081] RMSE 32.53899487437864 MSE 32.53899487437864 MAE 3.5286954144171254 MAPE R2 0.9869918992434982 total absolute error 5924.679600806356
| reward | ||||||||
|---|---|---|---|---|---|---|---|---|
| count | mean | std | min | 25% | 50% | 75% | max | |
| action | ||||||||
| 0 | 332.0 | 0.691255 | 0.370982 | -1.706571 | 0.603617 | 0.809029 | 0.907691 | 0.999355 |
| 1 | 195.0 | 0.645755 | 0.387155 | -1.612484 | 0.520735 | 0.765926 | 0.906936 | 0.998479 |
| 2 | 171.0 | 0.625529 | 0.527124 | -2.296013 | 0.579596 | 0.792531 | 0.920998 | 1.000111 |
| 3 | 119.0 | 0.597873 | 0.758569 | -4.419778 | 0.617470 | 0.805344 | 0.911065 | 1.000580 |
| 4 | 172.0 | 0.635945 | 0.404932 | -1.562130 | 0.590031 | 0.759970 | 0.880689 | 0.999956 |
| 5 | 147.0 | 0.619470 | 0.452534 | -1.490264 | 0.549575 | 0.771257 | 0.893266 | 0.999559 |
| 6 | 233.0 | 0.665894 | 0.384855 | -2.816472 | 0.570915 | 0.757521 | 0.903724 | 0.999609 |
| 7 | 46.0 | 0.451965 | 0.597153 | -2.408531 | 0.303217 | 0.620784 | 0.751096 | 0.988392 |
| 8 | 264.0 | 0.672887 | 0.369931 | -2.043032 | 0.557600 | 0.782171 | 0.910077 | 0.998720 |
cumulative_regret = history_best['reward'].cumsum() -history['reward'].cumsum()
cumulative_regret.plot(figsize=(14,5))
plt.show()
regret_gap = (history_best['reward'].cumsum() -history['reward'].cumsum())/(history_best['time'].cumsum()+1)
regret_gap.plot(figsize=(14,5))
plt.show()
regret_gap[0:100].plot(figsize=(14,5))
plt.show()
y_pred_iv_pct = [((y_pred_iv[i]/y_pred_iv[i-1]) -1) *100 for i in range(1,len(y_pred_iv))]
y_test_iv_pct = [((y_test_iv[i]/y_test_iv[i-1]) -1) *100 for i in range(1,len(y_pred_iv))]
print('rate of change accuracy')
rc = sum([y_pred_iv_pct[i]*y_test_iv_pct[i]>0 for i in range(len(y_test_iv_pct))])/len(y_test_iv_pct)
print(rc[0])
df_result2['UCB1 bandit all models'] = [rmse,mse,mae,mape,r2,tae]
# Visualising the results
plt.figure(figsize=(14,5))
plt.plot(y_test_iv_pct, color = 'red', label = 'Actual SPY change rate')
plt.plot(y_pred_iv_pct, color = 'blue', label = 'Predicted SPY change rate')
plt.title('SPY Price Prediction')
plt.xlabel('Time')
plt.ylabel('SPY Price change rate')
plt.legend()
plt.show()
plt.figure(figsize=(14,5))
plt.plot(y_test_iv_pct[:30], color = 'red', label = 'Actual SPY change rate')
plt.plot(y_pred_iv_pct[:30], color = 'blue', label = 'Predicted SPY change rate')
plt.title('SPY Price Prediction')
plt.xlabel('Time')
plt.ylabel('SPY Price change rate')
plt.legend()
ax = plt.axes()
plt.grid(axis='both', which='both')
plt.show()
rate of change accuracy 0.5059594755661502
df_result2.round(2).T
| 0 | 1 | 2 | 3 | 4 | 5 | |
|---|---|---|---|---|---|---|
| Unnamed: 0 | RMSE | MSE | MAE | MAPE | R2 | error_abs_sum |
| LSTM | 27.2 | 5.22 | 3.55 | 1.5 | 0.99 | 5954.81 |
| LSTM_deeper | 39.82 | 6.31 | 4.85 | 1.99 | 0.98 | 8142.69 |
| DNN | 152.74 | 12.36 | 9.02 | 3.5 | 0.94 | 15139.2 |
| DNN_deeper | 557.81 | 23.62 | 17.55 | 6.54 | 0.78 | 29462.2 |
| CNN | 40.29 | 6.35 | 4.42 | 1.94 | 0.98 | 7426.18 |
| CNN_deeper | 36.47 | 6.04 | 4.38 | 1.96 | 0.99 | 7347.95 |
| ResNet_5_layer | 31.19 | 5.58 | 3.99 | 1.75 | 0.99 | 6704.34 |
| ResNet_1_layer | 52.51 | 7.25 | 5.18 | 2.3 | 0.98 | 8691.5 |
| ResNet_deeper | 31.79 | 5.64 | 3.86 | 1.7 | 0.99 | 6474.5 |
| Epsilon-greedy bandit all models | 38.92 | 6.24 | 4.09 | 1.76 | 0.98 | 6875.02 |
| UCB1 bandit all models | 32.54 | 5.7 | 3.53 | 1.51 | 0.99 | 5924.68 |
for i in range(0, loop_test_range):
epsilon = 0.10
history = pd.DataFrame(data=None, columns=['time','action','prediction','reward','explore','score','count','score_ucb1'])
y_history= []
hisplot = pd.DataFrame(data=None, columns=['lstm1','lstm2','dnn1','dnn2','cnn1','cnn2',
'res1','res2','res3'])
for i in range(len(X_test)):
action = max(scores_ucb1, key= lambda x: scores_ucb1[x])
if action == cnnInput1 or action == cnnInput2 :
x_test = X_test_sqr[i:i+1]
else:
x_test= X_test[i:i+1]
y_pred = arms[action](x_test)
loss_avg = np.mean(abs(y_test[i]- y_pred)) # average loss is the reward here
reward = np.exp(-loss_avg)
y_pred_iv = float(scaler_single.inverse_transform(y_pred))
y_test_iv = float(scaler_single.inverse_transform(y_test[i].reshape(-1, 1)))
loss_iv = abs(y_pred_iv-y_test_iv)
reward = float(1-(loss_iv- val_min)/ val_max)
history.loc[i]=[i,action,y_pred[0].numpy(),reward,'explore',None,None,None]
score = history[['action', 'reward']].groupby('action').agg({'reward': ['mean', 'count']})
ubc1_arr = []
for actionX in range(len(scores)):
try:
mu, ct = score.loc[actionX]
history['score'].loc[i] = mu
history['count'].loc[i] = ct
ucb1 = mu + np.sqrt((2*np.log10(i+1))/ct)
scores[actionX] = mu
scores_ucb1[actionX] = ucb1
ubc1_arr.append(ucb1)
except:
ubc1_arr.append(0)
history['score_ucb1'][i] = ubc1_arr
y_history.append(y_pred[0])
hisplot.loc[i, arms_name[action]] = y_pred_iv
#y_history = history['prediction'].values
y_pred_iv = scaler_single.inverse_transform(y_history)
y_test_iv = scaler_single.inverse_transform(y_test.reshape(-1, 1))
rmse= mean_squared_error(y_test_iv, y_pred_iv, squared=True)
mse= mean_squared_error(y_test_iv, y_pred_iv, squared=False)
mae = mean_absolute_error(y_test_iv, y_pred_iv)
mape = MAPE(y_test_iv,y_pred_iv)
r2 = r2_score(y_test_iv, y_pred_iv)
tae = sum(abs(y_test_iv-y_pred_iv))[0]
df_result_loop.loc[len(df_result_loop)] = ['UCB1 bandit all models',rmse,mse,mae,mape,r2,tae]
#df_result_loop
def distr(weights, gamma=0.0):
weight_sum = float(sum(weights))
#print( (w / weight_sum) + (gamma / len(weights)) for w in weights)
return tuple((1.0 - gamma) * (w / weight_sum) + (gamma / len(weights)) for w in weights)
def draw(probability_distribution, arm):
arm = np.random.choice(list(arm.keys()), size=1, p=probability_distribution, replace=False)
return int(arm)
def update_weights(weights, gamma, probability_distribution, action, reward):
# iter through actions. up to n updates / rec
'''
if actions.shape[0] == 0:
return weights
for a in range(actions.shape[0]):
action = actions[a:a+1]
weight_idx = movieId_weight_mapping[action.movieId.values[0]]
estimated_reward = 1.0 * action.liked.values[0] / probability_distribution[weight_idx]
weights[weight_idx] *= math.exp(estimated_reward * gamma / num_arms)
'''
num_arms = len(probability_distribution)
estimated_reward = 1.0 * reward / probability_distribution[action]
weights[action] *= np.exp(estimated_reward * gamma / num_arms)
#print(weights, np.argmax(weights))
return weights
history = pd.DataFrame(data=None, columns=['time','action','prediction','reward','explore','score','count','exp3_distr','exp3_weight'])
y_history= []
hisplot = pd.DataFrame(data=None, columns=['lstm1','lstm2','dnn1','dnn2','cnn1','cnn2',
'res1','res2','res3'])
weights = [1.0] * len(arms)
gamma = 0.2
for i in range(len(X_test)):
probability_distribution = distr(weights, gamma)
action = draw(probability_distribution, arm=arms)
if action == cnnInput1 or action == cnnInput2 :
x_test = X_test_sqr[i:i+1]
else:
x_test= X_test[i:i+1]
y_pred = arms[action](x_test)
loss_avg = np.mean(abs(y_test[i]- y_pred)) # average loss is the reward here
reward = np.exp(-loss_avg)
y_pred_iv = scaler_single.inverse_transform(y_pred)
y_test_iv = scaler_single.inverse_transform(y_test[i].reshape(-1, 1))
loss_iv = abs(y_pred_iv-y_test_iv)
reward = float(1-(loss_iv- val_min)/ val_max)
weights = update_weights(weights, gamma, probability_distribution,action, reward)
history.loc[i]=[i,action,y_pred[0].numpy(),reward,bool(explore),None,None,None,None]
score = history[['action', 'reward']].groupby('action').agg({'reward': ['mean', 'count']})
mu, ct = score.loc[action]
history['score'].loc[i] = mu
history['count'].loc[i] = ct
history['exp3_weight'][i] = weights
history['exp3_distr'][i] = probability_distribution
y_history.append(y_pred[0])
#hisplot.loc[i,'model_'+str(action)] = y_pred_iv
hisplot.loc[i, arms_name[action]] = y_pred_iv
#y_history = history['prediction'].values
y_pred_iv = scaler_single.inverse_transform(y_history)
y_test_iv = scaler_single.inverse_transform(y_test.reshape(-1, 1))
# Visualising the results
plt.figure(figsize=(14,5))
plt.plot(y_test_iv, color = 'red', label = 'Actual SPY Price')
plt.plot(y_pred_iv, color = 'blue', label = 'Predicted SPY Price')
plt.title('SPY Price Prediction')
plt.xlabel('Time')
plt.ylabel('SPY Price')
plt.legend()
plt.show()
hisplot.astype('double').plot.line(figsize=(14,5))
print("The R2 score on the Test set is:\t{:0.3f}".format(r2_score(y_test_iv, y_pred_iv)))
error_cnn = sum(abs(y_pred_iv-y_test_iv))
print('The error is: ' + str(error_cnn))
print('RMSE')
rmse= mean_squared_error(y_test_iv, y_pred_iv, squared=True)
print(rmse)
print('MSE')
mse= mean_squared_error(y_test_iv, y_pred_iv, squared=False)
print(mse)
print('MAE')
mae = mean_absolute_error(y_test_iv, y_pred_iv)
print(mae)
print('MAPE')
mape = MAPE(y_test_iv,y_pred_iv)
print('R2')
r2 = r2_score(y_test_iv, y_pred_iv)
print(r2)
print('total absolute error')
tae = sum(abs(y_test_iv-y_pred_iv))[0]
print(tae)
#df_result['Exp3 bandit'] = [rmse,mae,r2,tae]
history[['action', 'reward']].groupby('action').describe()
The R2 score on the Test set is: 0.981 The error is: [7573.32799817] RMSE 46.92464461776934 MSE 6.850156539654356 MAE 4.510618224044751 MAPE R2 0.9812409538921678 total absolute error 7573.327998171147
| reward | ||||||||
|---|---|---|---|---|---|---|---|---|
| count | mean | std | min | 25% | 50% | 75% | max | |
| action | ||||||||
| 0 | 600.0 | 0.578588 | 0.432426 | -2.344289 | 0.462293 | 0.708909 | 0.854345 | 0.999855 |
| 1 | 275.0 | 0.646834 | 0.253146 | -1.035950 | 0.524529 | 0.686505 | 0.825155 | 0.997679 |
| 2 | 111.0 | 0.481170 | 0.523265 | -2.075663 | 0.302029 | 0.621614 | 0.844234 | 0.993586 |
| 3 | 64.0 | -0.360289 | 1.389665 | -4.518939 | -1.273250 | 0.148595 | 0.709139 | 0.998723 |
| 4 | 53.0 | 0.433255 | 0.757820 | -4.048204 | 0.301223 | 0.571520 | 0.800127 | 0.991711 |
| 5 | 151.0 | 0.619583 | 0.317156 | -0.747021 | 0.457036 | 0.700262 | 0.862912 | 0.998732 |
| 6 | 119.0 | 0.599621 | 0.427970 | -0.906226 | 0.454481 | 0.767733 | 0.884257 | 0.995580 |
| 7 | 65.0 | 0.494891 | 0.364061 | -0.534159 | 0.275404 | 0.571682 | 0.757741 | 0.982729 |
| 8 | 241.0 | 0.611083 | 0.355875 | -1.384959 | 0.495793 | 0.700273 | 0.846675 | 0.994964 |
cumulative_regret = history_best['reward'].cumsum() -history['reward'].cumsum()
cumulative_regret.plot(figsize=(14,5))
plt.show()
regret_gap = (history_best['reward'].cumsum() -history['reward'].cumsum())/(history_best['time'].cumsum()+1)
regret_gap.plot(figsize=(14,5))
plt.show()
regret_gap[0:100].plot(figsize=(14,5))
plt.show()
y_pred_iv_pct = [((y_pred_iv[i]/y_pred_iv[i-1]) -1) *100 for i in range(1,len(y_pred_iv))]
y_test_iv_pct = [((y_test_iv[i]/y_test_iv[i-1]) -1) *100 for i in range(1,len(y_pred_iv))]
print('rate of change accuracy')
rc = sum([y_pred_iv_pct[i]*y_test_iv_pct[i]>0 for i in range(len(y_test_iv_pct))])/len(y_test_iv_pct)
print(rc[0])
df_result2['Exp3 bandit all models'] = [rmse,mse,mae,mape,r2,tae]
# Visualising the results
plt.figure(figsize=(14,5))
plt.plot(y_test_iv_pct, color = 'red', label = 'Actual SPY change rate')
plt.plot(y_pred_iv_pct, color = 'blue', label = 'Predicted SPY change rate')
plt.title('SPY Price Prediction')
plt.xlabel('Time')
plt.ylabel('SPY Price change rate')
plt.legend()
plt.show()
plt.figure(figsize=(14,5))
plt.plot(y_test_iv_pct[:30], color = 'red', label = 'Actual SPY change rate')
plt.plot(y_pred_iv_pct[:30], color = 'blue', label = 'Predicted SPY change rate')
plt.title('SPY Price Prediction')
plt.xlabel('Time')
plt.ylabel('SPY Price change rate')
plt.legend()
ax = plt.axes()
plt.grid(axis='both', which='both')
plt.show()
rate of change accuracy 0.49761620977353993
df_result2.round(2).T
| 0 | 1 | 2 | 3 | 4 | 5 | |
|---|---|---|---|---|---|---|
| Unnamed: 0 | RMSE | MSE | MAE | MAPE | R2 | error_abs_sum |
| LSTM | 27.2 | 5.22 | 3.55 | 1.5 | 0.99 | 5954.81 |
| LSTM_deeper | 39.82 | 6.31 | 4.85 | 1.99 | 0.98 | 8142.69 |
| DNN | 152.74 | 12.36 | 9.02 | 3.5 | 0.94 | 15139.2 |
| DNN_deeper | 557.81 | 23.62 | 17.55 | 6.54 | 0.78 | 29462.2 |
| CNN | 40.29 | 6.35 | 4.42 | 1.94 | 0.98 | 7426.18 |
| CNN_deeper | 36.47 | 6.04 | 4.38 | 1.96 | 0.99 | 7347.95 |
| ResNet_5_layer | 31.19 | 5.58 | 3.99 | 1.75 | 0.99 | 6704.34 |
| ResNet_1_layer | 52.51 | 7.25 | 5.18 | 2.3 | 0.98 | 8691.5 |
| ResNet_deeper | 31.79 | 5.64 | 3.86 | 1.7 | 0.99 | 6474.5 |
| Epsilon-greedy bandit all models | 38.92 | 6.24 | 4.09 | 1.76 | 0.98 | 6875.02 |
| UCB1 bandit all models | 32.54 | 5.7 | 3.53 | 1.51 | 0.99 | 5924.68 |
| Exp3 bandit all models | 46.22 | 6.8 | 4.47 | 1.92 | 0.98 | 7505.18 |
for j in range(0, loop_test_range):
def distr(weights, gamma=0.0):
weight_sum = float(sum(weights))
#print( (w / weight_sum) + (gamma / len(weights)) for w in weights)
return tuple((1.0 - gamma) * (w / weight_sum) + (gamma / len(weights)) for w in weights)
def draw(probability_distribution, arm):
arm = np.random.choice(list(arm.keys()), size=1, p=probability_distribution, replace=False)
return int(arm)
def update_weights(weights, gamma, probability_distribution, action, reward):
# iter through actions. up to n updates / rec
num_arms = len(probability_distribution)
estimated_reward = 1.0 * reward / probability_distribution[action]
weights[action] *= np.exp(estimated_reward * gamma / num_arms)
#print(weights, np.argmax(weights))
return weights
history = pd.DataFrame(data=None, columns=['time','action','prediction','reward','explore','score','count','exp3_distr','exp3_weight'])
y_history= []
hisplot = pd.DataFrame(data=None, columns=['lstm1','lstm2','dnn1','dnn2','cnn1','cnn2',
'res1','res2','res3'])
weights = [1.0] * len(arms)
gamma = 0.2
for i in range(len(X_test)):
probability_distribution = distr(weights, gamma)
action = draw(probability_distribution, arm=arms)
if action == cnnInput1 or action == cnnInput2 :
x_test = X_test_sqr[i:i+1]
else:
x_test= X_test[i:i+1]
y_pred = arms[action](x_test)
loss_avg = np.mean(abs(y_test[i]- y_pred)) # average loss is the reward here
reward = np.exp(-loss_avg)
y_pred_iv = scaler_single.inverse_transform(y_pred)
y_test_iv = scaler_single.inverse_transform(y_test[i].reshape(-1, 1))
loss_iv = abs(y_pred_iv-y_test_iv)
reward = float(1-(loss_iv- val_min)/ val_max)
weights = update_weights(weights, gamma, probability_distribution,action, reward)
history.loc[i]=[i,action,y_pred[0].numpy(),reward,bool(explore),None,None,None,None]
score = history[['action', 'reward']].groupby('action').agg({'reward': ['mean', 'count']})
mu, ct = score.loc[action]
history['score'].loc[i] = mu
history['count'].loc[i] = ct
history['exp3_weight'][i] = weights
history['exp3_distr'][i] = probability_distribution
y_history.append(y_pred[0])
#hisplot.loc[i,'model_'+str(action)] = y_pred_iv
hisplot.loc[i, arms_name[action]] = y_pred_iv
#y_history = history['prediction'].values
y_pred_iv = scaler_single.inverse_transform(y_history)
y_test_iv = scaler_single.inverse_transform(y_test.reshape(-1, 1))
# Visualising the results
error_cnn = sum(abs(y_pred_iv-y_test_iv))
rmse= mean_squared_error(y_test_iv, y_pred_iv, squared=True)
mse= mean_squared_error(y_test_iv, y_pred_iv, squared=False)
mae = mean_absolute_error(y_test_iv, y_pred_iv)
mape = MAPE(y_test_iv,y_pred_iv)
r2 = r2_score(y_test_iv, y_pred_iv)
tae = sum(abs(y_test_iv-y_pred_iv))[0]
#df_result['Exp3 bandit'] = [rmse,mae,r2,tae]
history[['action', 'reward']].groupby('action').describe()
df_result_loop.loc[len(df_result_loop)] = ['Exp3 bandit all models',rmse,mse,mae,mape,r2,tae]
#df_result_loop
import warnings
warnings.filterwarnings('ignore')
from scipy import stats
'''
model_lstm1 = tf.keras.models.load_model('LSTM_1')
model_lstm2 = tf.keras.models.load_model('LSTM_2')
model_dnn1 = tf.keras.models.load_model('DNN_1')
model_dnn2 = tf.keras.models.load_model('DNN_2')
model_cnn1 = tf.keras.models.load_model('CNN_1')
model_cnn2 = tf.keras.models.load_model('CNN_2')
model_res1 = tf.keras.models.load_model('RES_1')
model_res2 = tf.keras.models.load_model('RES_2')
model_res3 = tf.keras.models.load_model('RES_3')
'''
"\nmodel_lstm1 = tf.keras.models.load_model('LSTM_1')\nmodel_lstm2 = tf.keras.models.load_model('LSTM_2')\n\nmodel_dnn1 = tf.keras.models.load_model('DNN_1')\nmodel_dnn2 = tf.keras.models.load_model('DNN_2')\n\nmodel_cnn1 = tf.keras.models.load_model('CNN_1')\nmodel_cnn2 = tf.keras.models.load_model('CNN_2')\n\nmodel_res1 = tf.keras.models.load_model('RES_1')\nmodel_res2 = tf.keras.models.load_model('RES_2')\nmodel_res3 = tf.keras.models.load_model('RES_3')\n"
models =[model_lstm1,model_cnn1,model_res1,model_res2,model_res3]
# bandit setting
arms ={
0:model_lstm1,
1:model_lstm2,
2:model_cnn2,
3:model_res1,
4:model_res3,
}
cnnInput1 = 2
cnnInput2 = 2
scores={0:1,1:1,2:1,3:1,4:1}
scores_ucb1={0:5,1:5,2:5,3:5,4:5}
arms_name = ['lstm1','lstm2','cnn2','res1','res3']
infile = open('X_test','rb')
X_test = pickle.load(infile)
infile.close()
infile = open('y_test','rb')
y_test = pickle.load(infile)
infile.close()
infile = open('scaler_single','rb')
scaler_single = pickle.load(infile)
infile.close()
#infile = open('df_result','rb')
#df_result = pickle.load(infile)
#infile.close()
print(scaler_single.inverse_transform(X_test[0]))
print(scaler_single.inverse_transform(y_test[0].reshape(-1, 1)))
[[156.79499817 157.60825276 158.45252492 157.32874927 158.98608934 182.96647311] [157.57939148 158.47539846 159.4837339 158.4646084 159.98478322 178.13824458] [157.42253113 158.85338157 160.1636388 159.75793068 159.78505122 177.38128541] [157.46611023 158.90897131 160.20897617 159.29683273 159.84052104 180.52120224] [157.8495636 158.9312106 160.48093121 159.46551873 160.32877741 168.73020395] [157.7449646 159.49818527 160.65092041 159.97161106 160.19562274 167.50522654] [157.33537292 159.14242448 160.02766128 159.71293631 159.67407771 185.84584543] [156.65559387 157.98623587 158.80381171 158.41963119 158.8085385 192.88723857] [156.63813782 158.08628722 157.87460316 157.47495524 158.78634041 195.59305395] [155.94960022 157.2636201 158.35054178 157.82358442 157.90971062 188.86029942] [157.69268799 158.78668066 159.91433515 159.24059834 160.1290454 197.5565475 ] [158.09358215 159.40924507 161.05887026 160.14029707 160.63948292 173.46600819] [157.52706909 159.06461242 160.46960552 159.58922752 159.91820589 178.00441754] [155.75794983 158.49763775 158.04457507 159.4093015 157.66558243 198.75388126] [155.24371338 156.28529491 157.20600667 156.9576229 157.01088274 192.4698655 ] [155.2263031 156.0629529 157.21734966 156.80017691 156.98868465 189.22581451] [156.19363403 157.34143217 158.497845 157.30625208 158.22041613 184.36914773] [155.69688416 156.89674815 157.76127704 157.78984722 157.58789759 181.73066408] [158.35505676 159.47594598 156.70741666 157.27251488 160.97238653 242.37940393] [158.17201233 159.44259553 160.54893726 159.81414791 160.73936586 201.23812925] [159.09208679 159.76500586 160.39029106 159.26309553 160.81703377 226.56313933] [159.94204712 160.48762162 162.09007924 161.242419 161.8934125 179.93738179] [160.29255676 160.89895518 162.61133794 161.3436306 162.33727268 163.11314869] [161.10746765 161.95510941 163.5065694 162.24332933 163.36927216 170.63933026] [161.09867859 162.19967374 163.89185057 163.09805084 163.35818158 169.99068735] [161.07243347 162.02181031 163.80119312 162.83937609 163.32489292 167.91762292] [161.83474731 162.76666537 164.19779999 163.06431364 164.29029813 180.15564533] [160.28378296 162.07740006 162.55467487 162.96308488 162.3261821 194.17277271] [160.25750732 161.55488701 162.72466406 162.11962054 162.29289343 178.1778072 ] [159.79307556 161.51040843 162.10140494 162.41203249 161.70477107 189.31790424] [160.77450562 161.76610089 163.08727655 161.96217455 162.94759313 180.16580782] [160.80950928 161.81057947 163.01928779 162.36703812 162.99198931 184.53112027] [160.91467285 162.14410096 162.91730465 163.10929086 163.12514398 182.06414432] [161.35282898 162.24415232 163.15526531 162.92934768 163.67997768 186.80789454] [159.20602417 162.19967374 161.26283654 162.6144557 160.96129595 206.82574257] [160.940979 161.74387856 161.95408443 161.06247582 163.15843265 188.05821544] [161.80845642 163.04459712 163.94851363 163.09805084 164.25700946 185.3439104 ] [161.59819031 162.73331492 164.08449115 163.30047405 163.99068319 174.37210086] [160.91467285 162.49984478 163.5065694 163.09805084 163.12514398 189.24283563] [161.38783264 162.85560557 163.20060268 163.77281202 163.72435692 181.20171272] [161.49302673 162.63326356 164.1751486 163.53665161 163.85752852 169.76355778] [160.16984558 161.33252804 161.80678121 162.27706653 162.18191992 199.55094672] [156.75247192 159.39813391 158.41853054 160.28650304 157.85422386 231.4102291 ] [155.98141479 157.0190388 156.4807644 157.42996087 156.87771114 219.76978623] [156.91021729 156.7744575 157.61395653 156.39531334 158.0539728 190.33624434] [155.40309143 155.94066225 156.20880936 155.76552938 156.14534352 234.72253189] [157.05041504 157.34143217 157.77260273 157.17130328 158.23150671 193.88052792] [156.13035583 156.76332938 156.25412944 155.12448824 157.06635256 225.55563085] [152.61659241 155.74054257 152.75256993 156.20413015 152.61659241 250.71477904] [153.68563843 152.92787461 153.06986234 152.8077756 153.97038798 213.1492373 ] [153.49282837 152.61659241 152.61659241 152.61659241 153.7262598 212.82223931] [155.51699829 154.75110622 154.32770629 153.51629114 156.28958877 199.71015907] [157.44468689 157.40813307 157.17201229 156.58649653 158.73085365 215.56431345] [157.73388672 157.63049205 158.84914908 158.14971642 159.09704592 182.70626317] [159.46012878 160.26527961 159.78968333 158.66704876 161.28309204 193.41046046] [159.53898621 160.69885247 161.68212939 161.01749861 161.38297498 183.75132686] [160.36265564 161.11018603 160.68491479 159.43178153 162.4260481 186.18718801] [161.24765015 162.39979342 162.76998414 161.68101977 163.54682301 184.49603249] [161.44041443 162.54432336 163.88050758 163.18800527 163.79095119 177.78899784] [160.37141418 163.05570828 162.99661911 162.7156673 162.43715561 197.05314874] [161.31777954 162.57767382 162.69066968 162.16461491 163.63559843 188.05068767] [161.13374329 162.98900738 164.05049677 163.49165724 163.40256083 193.53688519] [162.02752686 164.38977729 164.50376671 163.30047405 164.53442631 191.84209124] [161.96615601 163.76721289 164.53776109 164.17767559 164.45674147 193.10554392]] [[162.82492065]]
preds_all =[]
delta_all = []
X_test_sqr = np.reshape(X_test,(y_test.shape[0],sl,sl,6))
for i in range(len(models)):
if i == 1:
y_pred = models[i].predict(X_test_sqr)
else:
y_pred = models[i].predict(X_test)
y_pred_ivt = scaler_single.inverse_transform(y_pred.reshape(-1, 1))
y_test_ivt = scaler_single.inverse_transform(y_test[i].reshape(-1, 1))
delta = abs(y_pred_ivt-y_test_ivt)
preds_all.append(y_pred)
delta_all.append(delta)
stats.describe(delta)
DescribeResult(nobs=1679, minmax=(array([0.11209106]), array([185.84675598])), mean=array([69.65563383]), variance=array([2546.06340233]), skewness=array([0.33279645]), kurtosis=array([-1.10060131]))
import matplotlib.pyplot as plt
x = delta
plt.hist(x, density=True, bins=100,cumulative=True) # `density=False` would make counts
plt.ylabel('value')
plt.xlabel('Data')
plt.figure()
plt.hist(x, density=True, bins=100,cumulative=False) # `density=False` would make counts
plt.ylabel('value')
plt.xlabel('Data')
Text(0.5, 0, 'Data')
'''
from itertools import chain
newlist = list(chain(*delta))
val_max = max(newlist)
val_min = min(newlist)
print(len(newlist))
print(max(newlist))
print(min(newlist))
'''
'\nfrom itertools import chain\nnewlist = list(chain(*delta))\nval_max = max(newlist)\nval_min = min(newlist)\nprint(len(newlist))\nprint(max(newlist))\nprint(min(newlist))\n'
#arms={0: model_lstm, 1:model_dnn, 2:model_cnn }
#scores={0:1,1:1,2:1,3:1,4:1}
epsilon = 0.10
#history = pd.DataFrame(data=None, columns=['time','action','prediction','reward','explore','score','count'])
history = pd.DataFrame(data=None, columns=['time','action','prediction','reward','explore','score','count'])
hisplot = pd.DataFrame(data=None, columns= arms_name)
y_history= []
for i in range(len(X_test)):
explore = np.random.binomial(1, epsilon)
if explore == 1 or i == 0:
action = np.random.choice(len(arms))
else:
action = max(scores, key= lambda x: scores[x])
if action == cnnInput1 or action == cnnInput2 :
x_test = X_test_sqr[i:i+1]
else:
x_test= X_test[i:i+1]
#print(action)
y_pred = arms[action](x_test)
loss_avg = np.mean(abs(y_test[i]- y_pred)) # average loss is the reward here
reward = np.exp(-loss_avg)
#reward = -loss_avg
y_pred_iv = float(scaler_single.inverse_transform(y_pred))
y_test_iv = float(scaler_single.inverse_transform(y_test[i].reshape(-1, 1)))
loss_iv = abs(y_pred_iv-y_test_iv)
reward = float(1-(loss_iv- val_min)/ val_max)
#print(reward)
history.loc[i]=[i,action,y_pred[0].numpy(),reward,bool(explore),None,None]
score = history[['action', 'reward']].groupby('action').agg({'reward': ['mean', 'count']})
mu, ct = score.loc[action]
history['score'].loc[i] = mu
history['count'].loc[i] = ct
scores[action] = mu
y_history.append(y_pred[0])
#for histplot
#hisplot.loc[i,'label'] = y_test_iv
hisplot.loc[i,arms_name[action]] = y_pred_iv
#y_history = history['prediction'].values
y_pred_iv = scaler_single.inverse_transform(y_history)
y_test_iv = scaler_single.inverse_transform(y_test.reshape(-1, 1))
# Visualising the results
plt.figure(figsize=(14,5))
plt.plot(y_test_iv, color = 'red', label = 'Actual SPY Price')
plt.plot(y_pred_iv, color = 'blue', label = 'Predicted SPY Price')
plt.title('SPY Price Prediction')
plt.xlabel('Time')
plt.ylabel('SPY Price')
plt.legend()
plt.show()
hisplot.plot.line(figsize=(14,5))
print("The R2 score on the Test set is:\t{:0.3f}".format(r2_score(y_test_iv, y_pred_iv)))
error_cnn = sum(abs(y_pred_iv-y_test_iv))
print('The error is: ' + str(error_cnn))
print('RMSE')
rmse= mean_squared_error(y_test_iv, y_pred_iv, squared=True)
print(rmse)
print('MSE')
mse= mean_squared_error(y_test_iv, y_pred_iv, squared=False)
print(rmse)
print('MAE')
mae = mean_absolute_error(y_test_iv, y_pred_iv)
print(mae)
print('MAPE')
mape = MAPE(y_test_iv,y_pred_iv)
print('R2')
r2 = r2_score(y_test_iv, y_pred_iv)
print(r2)
print('total absolute error')
tae = sum(abs(y_test_iv-y_pred_iv))[0]
print(tae)
#df_result['Epsilon-greedy bandit selected'] = [rmse,mae,r2,tae]
history[['action', 'reward']].groupby('action').describe()
The R2 score on the Test set is: 0.989 The error is: [5864.84936183] RMSE 27.284530729756895 MSE 27.284530729756895 MAE 3.4930609659523904 MAPE R2 0.9890924742390856 total absolute error 5864.849361834077
| reward | ||||||||
|---|---|---|---|---|---|---|---|---|
| count | mean | std | min | 25% | 50% | 75% | max | |
| action | ||||||||
| 0 | 1263.0 | 0.659506 | 0.359743 | -2.062981 | 0.570664 | 0.762137 | 0.884457 | 1.000379 |
| 1 | 293.0 | 0.657988 | 0.416069 | -2.042970 | 0.594845 | 0.777125 | 0.898990 | 1.000051 |
| 2 | 40.0 | 0.566379 | 0.482472 | -1.796448 | 0.468193 | 0.663797 | 0.856997 | 0.978691 |
| 3 | 26.0 | 0.557257 | 0.351638 | -0.404682 | 0.353657 | 0.591976 | 0.803521 | 0.994123 |
| 4 | 57.0 | 0.537797 | 0.679459 | -3.332875 | 0.455114 | 0.710103 | 0.877732 | 0.990151 |
cumulative_regret = history_best['reward'].cumsum() -history['reward'].cumsum()
cumulative_regret.plot(figsize=(14,5))
plt.show()
regret_gap = (history_best['reward'].cumsum() -history['reward'].cumsum())/(history_best['time'].cumsum()+1)
regret_gap.plot(figsize=(14,5))
plt.show()
regret_gap[0:100].plot(figsize=(14,5))
plt.show()
y_pred_iv_pct = [((y_pred_iv[i]/y_pred_iv[i-1]) -1) *100 for i in range(1,len(y_pred_iv))]
y_test_iv_pct = [((y_test_iv[i]/y_test_iv[i-1]) -1) *100 for i in range(1,len(y_pred_iv))]
print('rate of change accuracy')
rc = sum([y_pred_iv_pct[i]*y_test_iv_pct[i]>0 for i in range(len(y_test_iv_pct))])/len(y_test_iv_pct)
print(rc[0])
df_result2['Epsilon-greedy bandit 5 selected'] = [rmse,mse,mae,mape,r2,tae]
# Visualising the results
plt.figure(figsize=(14,5))
plt.plot(y_test_iv_pct, color = 'red', label = 'Actual SPY change rate')
plt.plot(y_pred_iv_pct, color = 'blue', label = 'Predicted SPY change rate')
plt.title('SPY Price Prediction')
plt.xlabel('Time')
plt.ylabel('SPY Price change rate')
plt.legend()
plt.show()
plt.figure(figsize=(14,5))
plt.plot(y_test_iv_pct[:30], color = 'red', label = 'Actual SPY change rate')
plt.plot(y_pred_iv_pct[:30], color = 'blue', label = 'Predicted SPY change rate')
plt.title('SPY Price Prediction')
plt.xlabel('Time')
plt.ylabel('SPY Price change rate')
plt.legend()
ax = plt.axes()
plt.grid(axis='both', which='both')
plt.show()
rate of change accuracy 0.5214541120381406
df_result2.round(2).T
| 0 | 1 | 2 | 3 | 4 | 5 | |
|---|---|---|---|---|---|---|
| Unnamed: 0 | RMSE | MSE | MAE | MAPE | R2 | error_abs_sum |
| LSTM | 27.2 | 5.22 | 3.55 | 1.5 | 0.99 | 5954.81 |
| LSTM_deeper | 39.82 | 6.31 | 4.85 | 1.99 | 0.98 | 8142.69 |
| DNN | 152.74 | 12.36 | 9.02 | 3.5 | 0.94 | 15139.2 |
| DNN_deeper | 557.81 | 23.62 | 17.55 | 6.54 | 0.78 | 29462.2 |
| CNN | 40.29 | 6.35 | 4.42 | 1.94 | 0.98 | 7426.18 |
| CNN_deeper | 36.47 | 6.04 | 4.38 | 1.96 | 0.99 | 7347.95 |
| ResNet_5_layer | 31.19 | 5.58 | 3.99 | 1.75 | 0.99 | 6704.34 |
| ResNet_1_layer | 52.51 | 7.25 | 5.18 | 2.3 | 0.98 | 8691.5 |
| ResNet_deeper | 31.79 | 5.64 | 3.86 | 1.7 | 0.99 | 6474.5 |
| Epsilon-greedy bandit all models | 38.92 | 6.24 | 4.09 | 1.76 | 0.98 | 6875.02 |
| UCB1 bandit all models | 32.54 | 5.7 | 3.53 | 1.51 | 0.99 | 5924.68 |
| Exp3 bandit all models | 46.22 | 6.8 | 4.47 | 1.92 | 0.98 | 7505.18 |
| Epsilon-greedy bandit 5 selected | 27.28 | 5.22 | 3.49 | 1.47 | 0.99 | 5864.85 |
for j in range(0, loop_test_range):
epsilon = 0.10
#history = pd.DataFrame(data=None, columns=['time','action','prediction','reward','explore','score','count'])
history = pd.DataFrame(data=None, columns=['time','action','prediction','reward','explore','score','count'])
hisplot = pd.DataFrame(data=None, columns= arms_name)
y_history= []
for i in range(len(X_test)):
explore = np.random.binomial(1, epsilon)
if explore == 1 or i == 0:
action = np.random.choice(len(arms))
else:
action = max(scores, key= lambda x: scores[x])
if action == cnnInput1 or action == cnnInput2 :
x_test = X_test_sqr[i:i+1]
else:
x_test= X_test[i:i+1]
#print(action)
y_pred = arms[action](x_test)
loss_avg = np.mean(abs(y_test[i]- y_pred)) # average loss is the reward here
reward = np.exp(-loss_avg)
#reward = -loss_avg
y_pred_iv = float(scaler_single.inverse_transform(y_pred))
y_test_iv = float(scaler_single.inverse_transform(y_test[i].reshape(-1, 1)))
loss_iv = abs(y_pred_iv-y_test_iv)
reward = float(1-(loss_iv- val_min)/ val_max)
#print(reward)
history.loc[i]=[i,action,y_pred[0].numpy(),reward,bool(explore),None,None]
score = history[['action', 'reward']].groupby('action').agg({'reward': ['mean', 'count']})
mu, ct = score.loc[action]
history['score'].loc[i] = mu
history['count'].loc[i] = ct
scores[action] = mu
y_history.append(y_pred[0])
#for histplot
#hisplot.loc[i,'label'] = y_test_iv
hisplot.loc[i,arms_name[action]] = y_pred_iv
#y_history = history['prediction'].values
y_pred_iv = scaler_single.inverse_transform(y_history)
y_test_iv = scaler_single.inverse_transform(y_test.reshape(-1, 1))
error_cnn = sum(abs(y_pred_iv-y_test_iv))
rmse= mean_squared_error(y_test_iv, y_pred_iv, squared=True)
mse= mean_squared_error(y_test_iv, y_pred_iv, squared=False)
mae = mean_absolute_error(y_test_iv, y_pred_iv)
mape = MAPE(y_test_iv,y_pred_iv)
r2 = r2_score(y_test_iv, y_pred_iv)
tae = sum(abs(y_test_iv-y_pred_iv))[0]
df_result_loop.loc[len(df_result_loop)] = ['Epsilon-greedy bandit 5 selected',rmse,mse,mae,mape,r2,tae]
#df_result_loop
#scores={0:1,1:1,2:1}
#scores_ucb1={0:5,1:5,2:5}
epsilon = 0.10
history = pd.DataFrame(data=None, columns=['time','action','prediction','reward','explore','score','count','score_ucb1'])
y_history= []
hisplot = pd.DataFrame(data=None, columns=arms_name)
for i in range(len(X_test)):
action = max(scores_ucb1, key= lambda x: scores_ucb1[x])
if action == cnnInput1 or action == cnnInput2:
x_test = X_test_sqr[i:i+1]
else:
x_test= X_test[i:i+1]
y_pred = arms[action](x_test)
loss_avg = np.mean(abs(y_test[i]- y_pred)) # average loss is the reward here
reward = np.exp(-loss_avg)
y_pred_iv = float(scaler_single.inverse_transform(y_pred))
y_test_iv = float(scaler_single.inverse_transform(y_test[i].reshape(-1, 1)))
loss_iv = abs(y_pred_iv-y_test_iv)
reward = float(1-(loss_iv- val_min)/ val_max)
history.loc[i]=[i,action,y_pred[0].numpy(),reward,'explore',None,None,None]
score = history[['action', 'reward']].groupby('action').agg({'reward': ['mean', 'count']})
ubc1_arr = []
for actionX in range(len(scores)):
try:
mu, ct = score.loc[actionX]
history['score'].loc[i] = mu
history['count'].loc[i] = ct
ucb1 = mu + np.sqrt((2*np.log10(i+1))/ct)
scores[actionX] = mu
scores_ucb1[actionX] = ucb1
ubc1_arr.append(ucb1)
except:
ubc1_arr.append(0)
history['score_ucb1'][i] = ubc1_arr
y_history.append(y_pred[0])
#hisplot.loc[i,'model_'+str(action)] = y_pred_iv
hisplot.loc[i,arms_name[action]] = y_pred_iv
#y_history = history['prediction'].values
y_pred_iv = scaler_single.inverse_transform(y_history)
y_test_iv = scaler_single.inverse_transform(y_test.reshape(-1, 1))
# Visualising the results
plt.figure(figsize=(14,5))
plt.plot(y_test_iv, color = 'red', label = 'Actual SPY Price')
plt.plot(y_pred_iv, color = 'blue', label = 'Predicted SPY Price')
plt.title('SPY Price Prediction')
plt.xlabel('Time')
plt.ylabel('SPY Price')
plt.legend()
plt.show()
hisplot.plot.line(figsize=(14,5))
print("The R2 score on the Test set is:\t{:0.3f}".format(r2_score(y_test_iv, y_pred_iv)))
error_cnn = sum(abs(y_pred_iv-y_test_iv))
print('The error is: ' + str(error_cnn))
print('RMSE')
rmse= mean_squared_error(y_test_iv, y_pred_iv, squared=True)
print(rmse)
print('MSE')
mse= mean_squared_error(y_test_iv, y_pred_iv, squared=False)
print(rmse)
print('MAE')
mae = mean_absolute_error(y_test_iv, y_pred_iv)
print(mae)
print('MAPE')
mape = MAPE(y_test_iv,y_pred_iv)
print('R2')
r2 = r2_score(y_test_iv, y_pred_iv)
print(r2)
print('total absolute error')
tae = sum(abs(y_test_iv-y_pred_iv))[0]
print(tae)
#df_result['UCB1 bandit selected'] = [rmse,mae,r2,tae]
history[['action', 'reward']].groupby('action').describe()
The R2 score on the Test set is: 0.990 The error is: [5513.50967905] RMSE 24.88238996580434 MSE 24.88238996580434 MAE 3.283805645650155 MAPE R2 0.990052777076018 total absolute error 5513.509679046622
| reward | ||||||||
|---|---|---|---|---|---|---|---|---|
| count | mean | std | min | 25% | 50% | 75% | max | |
| action | ||||||||
| 0 | 421.0 | 0.686230 | 0.351075 | -1.271260 | 0.612163 | 0.788325 | 0.896484 | 1.000379 |
| 1 | 373.0 | 0.678392 | 0.379341 | -2.042970 | 0.581929 | 0.808004 | 0.911989 | 0.999478 |
| 2 | 269.0 | 0.654344 | 0.404034 | -2.185986 | 0.578763 | 0.750778 | 0.886548 | 0.999870 |
| 3 | 368.0 | 0.678403 | 0.364623 | -2.816472 | 0.582493 | 0.777687 | 0.904954 | 0.999646 |
| 4 | 248.0 | 0.649456 | 0.395100 | -2.704130 | 0.518948 | 0.738999 | 0.902426 | 0.998720 |
cumulative_regret = history_best['reward'].cumsum() -history['reward'].cumsum()
cumulative_regret.plot(figsize=(14,5))
plt.show()
regret_gap = (history_best['reward'].cumsum() -history['reward'].cumsum())/(history_best['time'].cumsum()+1)
regret_gap.plot(figsize=(14,5))
plt.show()
regret_gap[0:100].plot(figsize=(14,5))
plt.show()
y_pred_iv_pct = [((y_pred_iv[i]/y_pred_iv[i-1]) -1) *100 for i in range(1,len(y_pred_iv))]
y_test_iv_pct = [((y_test_iv[i]/y_test_iv[i-1]) -1) *100 for i in range(1,len(y_pred_iv))]
print('rate of change accuracy')
rc = sum([y_pred_iv_pct[i]*y_test_iv_pct[i]>0 for i in range(len(y_test_iv_pct))])/len(y_test_iv_pct)
print(rc[0])
df_result2['UCB1 bandit 5 selected'] =[rmse,mse,mae,mape,r2,tae]
# Visualising the results
plt.figure(figsize=(14,5))
plt.plot(y_test_iv_pct, color = 'red', label = 'Actual SPY change rate')
plt.plot(y_pred_iv_pct, color = 'blue', label = 'Predicted SPY change rate')
plt.title('SPY Price Prediction')
plt.xlabel('Time')
plt.ylabel('SPY Price change rate')
plt.legend()
plt.show()
plt.figure(figsize=(14,5))
plt.plot(y_test_iv_pct[:30], color = 'red', label = 'Actual SPY change rate')
plt.plot(y_pred_iv_pct[:30], color = 'blue', label = 'Predicted SPY change rate')
plt.title('SPY Price Prediction')
plt.xlabel('Time')
plt.ylabel('SPY Price change rate')
plt.legend()
ax = plt.axes()
plt.grid(axis='both', which='both')
plt.show()
rate of change accuracy 0.5125148986889154
df_result2.round(2).T
| 0 | 1 | 2 | 3 | 4 | 5 | |
|---|---|---|---|---|---|---|
| Unnamed: 0 | RMSE | MSE | MAE | MAPE | R2 | error_abs_sum |
| LSTM | 27.2 | 5.22 | 3.55 | 1.5 | 0.99 | 5954.81 |
| LSTM_deeper | 39.82 | 6.31 | 4.85 | 1.99 | 0.98 | 8142.69 |
| DNN | 152.74 | 12.36 | 9.02 | 3.5 | 0.94 | 15139.2 |
| DNN_deeper | 557.81 | 23.62 | 17.55 | 6.54 | 0.78 | 29462.2 |
| CNN | 40.29 | 6.35 | 4.42 | 1.94 | 0.98 | 7426.18 |
| CNN_deeper | 36.47 | 6.04 | 4.38 | 1.96 | 0.99 | 7347.95 |
| ResNet_5_layer | 31.19 | 5.58 | 3.99 | 1.75 | 0.99 | 6704.34 |
| ResNet_1_layer | 52.51 | 7.25 | 5.18 | 2.3 | 0.98 | 8691.5 |
| ResNet_deeper | 31.79 | 5.64 | 3.86 | 1.7 | 0.99 | 6474.5 |
| Epsilon-greedy bandit all models | 38.92 | 6.24 | 4.09 | 1.76 | 0.98 | 6875.02 |
| UCB1 bandit all models | 32.54 | 5.7 | 3.53 | 1.51 | 0.99 | 5924.68 |
| Exp3 bandit all models | 46.22 | 6.8 | 4.47 | 1.92 | 0.98 | 7505.18 |
| Epsilon-greedy bandit 5 selected | 27.28 | 5.22 | 3.49 | 1.47 | 0.99 | 5864.85 |
| UCB1 bandit 5 selected | 24.88 | 4.99 | 3.28 | 1.4 | 0.99 | 5513.51 |
for j in range(0, loop_test_range):
epsilon = 0.10
history = pd.DataFrame(data=None, columns=['time','action','prediction','reward','explore','score','count','score_ucb1'])
y_history= []
hisplot = pd.DataFrame(data=None, columns=arms_name)
for i in range(len(X_test)):
action = max(scores_ucb1, key= lambda x: scores_ucb1[x])
if action == cnnInput1 or action == cnnInput2:
x_test = X_test_sqr[i:i+1]
else:
x_test= X_test[i:i+1]
y_pred = arms[action](x_test)
loss_avg = np.mean(abs(y_test[i]- y_pred)) # average loss is the reward here
reward = np.exp(-loss_avg)
y_pred_iv = float(scaler_single.inverse_transform(y_pred))
y_test_iv = float(scaler_single.inverse_transform(y_test[i].reshape(-1, 1)))
loss_iv = abs(y_pred_iv-y_test_iv)
reward = float(1-(loss_iv- val_min)/ val_max)
history.loc[i]=[i,action,y_pred[0].numpy(),reward,'explore',None,None,None]
score = history[['action', 'reward']].groupby('action').agg({'reward': ['mean', 'count']})
ubc1_arr = []
for actionX in range(len(scores)):
try:
mu, ct = score.loc[actionX]
history['score'].loc[i] = mu
history['count'].loc[i] = ct
ucb1 = mu + np.sqrt((2*np.log10(i+1))/ct)
scores[actionX] = mu
scores_ucb1[actionX] = ucb1
ubc1_arr.append(ucb1)
except:
ubc1_arr.append(0)
history['score_ucb1'][i] = ubc1_arr
y_history.append(y_pred[0])
#hisplot.loc[i,'model_'+str(action)] = y_pred_iv
hisplot.loc[i,arms_name[action]] = y_pred_iv
#y_history = history['prediction'].values
y_pred_iv = scaler_single.inverse_transform(y_history)
y_test_iv = scaler_single.inverse_transform(y_test.reshape(-1, 1))
rmse= mean_squared_error(y_test_iv, y_pred_iv, squared=True)
mse= mean_squared_error(y_test_iv, y_pred_iv, squared=False)
mae = mean_absolute_error(y_test_iv, y_pred_iv)
mape = MAPE(y_test_iv,y_pred_iv)
r2 = r2_score(y_test_iv, y_pred_iv)
tae = sum(abs(y_test_iv-y_pred_iv))[0]
df_result_loop.loc[len(df_result_loop)] = ['UCB1 bandit 5 selected',rmse,mse,mae,mape,r2,tae]
#df_result_loop
def distr(weights, gamma=0.0):
weight_sum = float(sum(weights))
return tuple((1.0 - gamma) * (w / weight_sum) + (gamma / len(weights)) for w in weights)
def draw(probability_distribution, arm):
arm = np.random.choice(list(arm.keys()), size=1,
p=probability_distribution, replace=False)
return int(arm)
def update_weights(weights, gamma, probability_distribution, action, reward):
# iter through actions. up to n updates / rec
'''
if actions.shape[0] == 0:
return weights
for a in range(actions.shape[0]):
action = actions[a:a+1]
weight_idx = movieId_weight_mapping[action.movieId.values[0]]
estimated_reward = 1.0 * action.liked.values[0] / probability_distribution[weight_idx]
weights[weight_idx] *= math.exp(estimated_reward * gamma / num_arms)
'''
num_arms = len(probability_distribution)
estimated_reward = 1.0 * reward / probability_distribution[action]
weights[action] *= np.exp(estimated_reward * gamma / num_arms)
#print(weights, np.argmax(weights))
return weights
history = pd.DataFrame(data=None, columns=['time','action','prediction','reward','explore','score','count','exp3_distr','exp3_weight'])
y_history= []
hisplot = pd.DataFrame(data=None, columns=arms_name)
weights = [1.0] * len(arms)
gamma = 0.2
for i in range(len(X_test)):
probability_distribution = distr(weights, gamma)
action = draw(probability_distribution, arm=arms)
if action == cnnInput1 or action == cnnInput2 :
x_test = X_test_sqr[i:i+1]
else:
x_test= X_test[i:i+1]
y_pred = arms[action](x_test)
loss_avg = np.mean(abs(y_test[i]- y_pred)) # average loss is the reward here
reward = np.exp(-loss_avg)
y_pred_iv = float(scaler_single.inverse_transform(y_pred))
y_test_iv = float(scaler_single.inverse_transform(y_test[i].reshape(-1, 1)))
loss_iv = abs(y_pred_iv-y_test_iv)
reward = float(1-(loss_iv- val_min)/ val_max)
weights = update_weights(weights, gamma, probability_distribution,action, reward)
history.loc[i]=[i,action,y_pred[0].numpy(),reward,bool(explore),None,None,None,None]
score = history[['action', 'reward']].groupby('action').agg({'reward': ['mean', 'count']})
mu, ct = score.loc[action]
history['score'].loc[i] = mu
history['count'].loc[i] = ct
history['exp3_weight'][i] = weights
history['exp3_distr'][i] = probability_distribution
y_history.append(y_pred[0])
#hisplot.loc[i,'model_'+str(action)] = y_pred_iv
hisplot.loc[i,arms_name[action]] = y_pred_iv
#y_history = history['prediction'].values
y_pred_iv = scaler_single.inverse_transform(y_history)
y_test_iv = scaler_single.inverse_transform(y_test.reshape(-1, 1))
# Visualising the results
plt.figure(figsize=(14,5))
plt.plot(y_test_iv, color = 'red', label = 'Actual SPY Price')
plt.plot(y_pred_iv, color = 'blue', label = 'Predicted SPY Price')
plt.title('SPY Price Prediction')
plt.xlabel('Time')
plt.ylabel('SPY Price')
plt.legend()
plt.show()
hisplot.plot.line(figsize=(14,5))
print("The R2 score on the Test set is:\t{:0.3f}".format(r2_score(y_test_iv, y_pred_iv)))
error_cnn = sum(abs(y_pred_iv-y_test_iv))
print('The error is: ' + str(error_cnn))
print('RMSE')
rmse= mean_squared_error(y_test_iv, y_pred_iv, squared=True)
print(rmse)
print('MSE')
mse= mean_squared_error(y_test_iv, y_pred_iv, squared=False)
print(rmse)
print('MAE')
mae = mean_absolute_error(y_test_iv, y_pred_iv)
print(mae)
print('MAPE')
mape = MAPE(y_test_iv,y_pred_iv)
print('R2')
r2 = r2_score(y_test_iv, y_pred_iv)
print(r2)
print('total absolute error')
tae = sum(abs(y_test_iv-y_pred_iv))[0]
print(tae)
#df_result['Exp3 bandit selected'] = [rmse,mae,r2,tae]
history[['action', 'reward']].groupby('action').describe()
The R2 score on the Test set is: 0.988 The error is: [6635.10569078] RMSE 30.75676632515548 MSE 30.75676632515548 MAE 3.9518199468640494 MAPE R2 0.9877043800262917 total absolute error 6635.105690784756
| reward | ||||||||
|---|---|---|---|---|---|---|---|---|
| count | mean | std | min | 25% | 50% | 75% | max | |
| action | ||||||||
| 0 | 819.0 | 0.598124 | 0.417919 | -2.915375 | 0.480288 | 0.717019 | 0.867003 | 0.999970 |
| 1 | 601.0 | 0.640930 | 0.310366 | -2.042970 | 0.504548 | 0.686560 | 0.870596 | 1.000051 |
| 2 | 79.0 | 0.507660 | 0.495018 | -2.185986 | 0.353885 | 0.633197 | 0.830052 | 0.985424 |
| 3 | 100.0 | 0.556757 | 0.407332 | -1.185924 | 0.387724 | 0.683851 | 0.837354 | 0.999447 |
| 4 | 80.0 | 0.570947 | 0.451524 | -1.680003 | 0.425395 | 0.695646 | 0.874713 | 0.995513 |
cumulative_regret = history_best['reward'].cumsum() -history['reward'].cumsum()
cumulative_regret.plot(figsize=(14,5))
plt.show()
regret_gap = (history_best['reward'].cumsum() -history['reward'].cumsum())/(history_best['time'].cumsum()+1)
regret_gap.plot(figsize=(14,5))
plt.show()
regret_gap[0:100].plot(figsize=(14,5))
plt.show()
y_pred_iv_pct = [((y_pred_iv[i]/y_pred_iv[i-1]) -1) *100 for i in range(1,len(y_pred_iv))]
y_test_iv_pct = [((y_test_iv[i]/y_test_iv[i-1]) -1) *100 for i in range(1,len(y_pred_iv))]
print('rate of change accuracy')
rc = sum([y_pred_iv_pct[i]*y_test_iv_pct[i]>0 for i in range(len(y_test_iv_pct))])/len(y_test_iv_pct)
print(rc[0])
df_result2['Exp3 bandit 5 selected'] =[rmse,mse,mae,mape,r2,tae]
# Visualising the results
plt.figure(figsize=(14,5))
plt.plot(y_test_iv_pct, color = 'red', label = 'Actual SPY change rate')
plt.plot(y_pred_iv_pct, color = 'blue', label = 'Predicted SPY change rate')
plt.title('SPY Price Prediction')
plt.xlabel('Time')
plt.ylabel('SPY Price change rate')
plt.legend()
plt.show()
plt.figure(figsize=(14,5))
plt.plot(y_test_iv_pct[:30], color = 'red', label = 'Actual SPY change rate')
plt.plot(y_pred_iv_pct[:30], color = 'blue', label = 'Predicted SPY change rate')
plt.title('SPY Price Prediction')
plt.xlabel('Time')
plt.ylabel('SPY Price change rate')
plt.legend()
ax = plt.axes()
plt.grid(axis='both', which='both')
plt.show()
rate of change accuracy 0.5101311084624554
df_result2.round(2).T
| 0 | 1 | 2 | 3 | 4 | 5 | |
|---|---|---|---|---|---|---|
| Unnamed: 0 | RMSE | MSE | MAE | MAPE | R2 | error_abs_sum |
| LSTM | 27.2 | 5.22 | 3.55 | 1.5 | 0.99 | 5954.81 |
| LSTM_deeper | 39.82 | 6.31 | 4.85 | 1.99 | 0.98 | 8142.69 |
| DNN | 152.74 | 12.36 | 9.02 | 3.5 | 0.94 | 15139.2 |
| DNN_deeper | 557.81 | 23.62 | 17.55 | 6.54 | 0.78 | 29462.2 |
| CNN | 40.29 | 6.35 | 4.42 | 1.94 | 0.98 | 7426.18 |
| CNN_deeper | 36.47 | 6.04 | 4.38 | 1.96 | 0.99 | 7347.95 |
| ResNet_5_layer | 31.19 | 5.58 | 3.99 | 1.75 | 0.99 | 6704.34 |
| ResNet_1_layer | 52.51 | 7.25 | 5.18 | 2.3 | 0.98 | 8691.5 |
| ResNet_deeper | 31.79 | 5.64 | 3.86 | 1.7 | 0.99 | 6474.5 |
| Epsilon-greedy bandit all models | 38.92 | 6.24 | 4.09 | 1.76 | 0.98 | 6875.02 |
| UCB1 bandit all models | 32.54 | 5.7 | 3.53 | 1.51 | 0.99 | 5924.68 |
| Exp3 bandit all models | 46.22 | 6.8 | 4.47 | 1.92 | 0.98 | 7505.18 |
| Epsilon-greedy bandit 5 selected | 27.28 | 5.22 | 3.49 | 1.47 | 0.99 | 5864.85 |
| UCB1 bandit 5 selected | 24.88 | 4.99 | 3.28 | 1.4 | 0.99 | 5513.51 |
| Exp3 bandit 5 selected | 30.76 | 5.55 | 3.95 | 1.68 | 0.99 | 6635.11 |
for j in range(0, loop_test_range):
def distr(weights, gamma=0.0):
weight_sum = float(sum(weights))
return tuple((1.0 - gamma) * (w / weight_sum) + (gamma / len(weights)) for w in weights)
def draw(probability_distribution, arm):
arm = np.random.choice(list(arm.keys()), size=1,
p=probability_distribution, replace=False)
return int(arm)
def update_weights(weights, gamma, probability_distribution, action, reward):
# iter through actions. up to n updates / rec
'''
if actions.shape[0] == 0:
return weights
for a in range(actions.shape[0]):
action = actions[a:a+1]
weight_idx = movieId_weight_mapping[action.movieId.values[0]]
estimated_reward = 1.0 * action.liked.values[0] / probability_distribution[weight_idx]
weights[weight_idx] *= math.exp(estimated_reward * gamma / num_arms)
'''
num_arms = len(probability_distribution)
estimated_reward = 1.0 * reward / probability_distribution[action]
weights[action] *= np.exp(estimated_reward * gamma / num_arms)
#print(weights, np.argmax(weights))
return weights
history = pd.DataFrame(data=None, columns=['time','action','prediction','reward','explore','score','count','exp3_distr','exp3_weight'])
y_history= []
hisplot = pd.DataFrame(data=None, columns=arms_name)
weights = [1.0] * len(arms)
gamma = 0.2
for i in range(len(X_test)):
probability_distribution = distr(weights, gamma)
action = draw(probability_distribution, arm=arms)
if action == cnnInput1 or action == cnnInput2 :
x_test = X_test_sqr[i:i+1]
else:
x_test= X_test[i:i+1]
y_pred = arms[action](x_test)
loss_avg = np.mean(abs(y_test[i]- y_pred)) # average loss is the reward here
reward = np.exp(-loss_avg)
y_pred_iv = float(scaler_single.inverse_transform(y_pred))
y_test_iv = float(scaler_single.inverse_transform(y_test[i].reshape(-1, 1)))
loss_iv = abs(y_pred_iv-y_test_iv)
reward = float(1-(loss_iv- val_min)/ val_max)
weights = update_weights(weights, gamma, probability_distribution,action, reward)
history.loc[i]=[i,action,y_pred[0].numpy(),reward,bool(explore),None,None,None,None]
score = history[['action', 'reward']].groupby('action').agg({'reward': ['mean', 'count']})
mu, ct = score.loc[action]
history['score'].loc[i] = mu
history['count'].loc[i] = ct
history['exp3_weight'][i] = weights
history['exp3_distr'][i] = probability_distribution
y_history.append(y_pred[0])
#hisplot.loc[i,'model_'+str(action)] = y_pred_iv
hisplot.loc[i,arms_name[action]] = y_pred_iv
#y_history = history['prediction'].values
y_pred_iv = scaler_single.inverse_transform(y_history)
y_test_iv = scaler_single.inverse_transform(y_test.reshape(-1, 1))
error_cnn = sum(abs(y_pred_iv-y_test_iv))
rmse= mean_squared_error(y_test_iv, y_pred_iv, squared=True)
mse= mean_squared_error(y_test_iv, y_pred_iv, squared=False)
mae = mean_absolute_error(y_test_iv, y_pred_iv)
mape = MAPE(y_test_iv,y_pred_iv)
r2 = r2_score(y_test_iv, y_pred_iv)
tae = sum(abs(y_test_iv-y_pred_iv))[0]
df_result_loop.loc[len(df_result_loop)] = ['Exp3 bandit 5 selected',rmse,mse,mae,mape,r2,tae]
#df_result_loop
import warnings
warnings.filterwarnings('ignore')
from scipy import stats
'''
model_lstm1 = tf.keras.models.load_model('LSTM_1')
model_lstm2 = tf.keras.models.load_model('LSTM_2')
model_dnn1 = tf.keras.models.load_model('DNN_1')
model_dnn2 = tf.keras.models.load_model('DNN_2')
model_cnn1 = tf.keras.models.load_model('CNN_1')
model_cnn2 = tf.keras.models.load_model('CNN_2')
model_res1 = tf.keras.models.load_model('RES_1')
model_res2 = tf.keras.models.load_model('RES_2')
model_res3 = tf.keras.models.load_model('RES_3')
'''
"\nmodel_lstm1 = tf.keras.models.load_model('LSTM_1')\nmodel_lstm2 = tf.keras.models.load_model('LSTM_2')\n\nmodel_dnn1 = tf.keras.models.load_model('DNN_1')\nmodel_dnn2 = tf.keras.models.load_model('DNN_2')\n\nmodel_cnn1 = tf.keras.models.load_model('CNN_1')\nmodel_cnn2 = tf.keras.models.load_model('CNN_2')\n\nmodel_res1 = tf.keras.models.load_model('RES_1')\nmodel_res2 = tf.keras.models.load_model('RES_2')\nmodel_res3 = tf.keras.models.load_model('RES_3')\n"
models =[model_lstm1,model_cnn1,model_res1,model_res2,model_res3]
# bandit setting
arms ={
0:model_lstm1,
1:model_cnn2,
2:model_res1,
}
cnnInput1 = 1
cnnInput2 = 1
scores={0:1,1:1,2:1}
scores_ucb1={0:5,1:5,2:5}
arms_name = ['lstm1','cnn2','res1']
infile = open('X_test','rb')
X_test = pickle.load(infile)
infile.close()
infile = open('y_test','rb')
y_test = pickle.load(infile)
infile.close()
infile = open('scaler_single','rb')
scaler_single = pickle.load(infile)
infile.close()
#infile = open('df_result','rb')
#df_result = pickle.load(infile)
#infile.close()
print(scaler_single.inverse_transform(X_test[0]))
print(scaler_single.inverse_transform(y_test[0].reshape(-1, 1)))
[[156.79499817 157.60825276 158.45252492 157.32874927 158.98608934 182.96647311] [157.57939148 158.47539846 159.4837339 158.4646084 159.98478322 178.13824458] [157.42253113 158.85338157 160.1636388 159.75793068 159.78505122 177.38128541] [157.46611023 158.90897131 160.20897617 159.29683273 159.84052104 180.52120224] [157.8495636 158.9312106 160.48093121 159.46551873 160.32877741 168.73020395] [157.7449646 159.49818527 160.65092041 159.97161106 160.19562274 167.50522654] [157.33537292 159.14242448 160.02766128 159.71293631 159.67407771 185.84584543] [156.65559387 157.98623587 158.80381171 158.41963119 158.8085385 192.88723857] [156.63813782 158.08628722 157.87460316 157.47495524 158.78634041 195.59305395] [155.94960022 157.2636201 158.35054178 157.82358442 157.90971062 188.86029942] [157.69268799 158.78668066 159.91433515 159.24059834 160.1290454 197.5565475 ] [158.09358215 159.40924507 161.05887026 160.14029707 160.63948292 173.46600819] [157.52706909 159.06461242 160.46960552 159.58922752 159.91820589 178.00441754] [155.75794983 158.49763775 158.04457507 159.4093015 157.66558243 198.75388126] [155.24371338 156.28529491 157.20600667 156.9576229 157.01088274 192.4698655 ] [155.2263031 156.0629529 157.21734966 156.80017691 156.98868465 189.22581451] [156.19363403 157.34143217 158.497845 157.30625208 158.22041613 184.36914773] [155.69688416 156.89674815 157.76127704 157.78984722 157.58789759 181.73066408] [158.35505676 159.47594598 156.70741666 157.27251488 160.97238653 242.37940393] [158.17201233 159.44259553 160.54893726 159.81414791 160.73936586 201.23812925] [159.09208679 159.76500586 160.39029106 159.26309553 160.81703377 226.56313933] [159.94204712 160.48762162 162.09007924 161.242419 161.8934125 179.93738179] [160.29255676 160.89895518 162.61133794 161.3436306 162.33727268 163.11314869] [161.10746765 161.95510941 163.5065694 162.24332933 163.36927216 170.63933026] [161.09867859 162.19967374 163.89185057 163.09805084 163.35818158 169.99068735] [161.07243347 162.02181031 163.80119312 162.83937609 163.32489292 167.91762292] [161.83474731 162.76666537 164.19779999 163.06431364 164.29029813 180.15564533] [160.28378296 162.07740006 162.55467487 162.96308488 162.3261821 194.17277271] [160.25750732 161.55488701 162.72466406 162.11962054 162.29289343 178.1778072 ] [159.79307556 161.51040843 162.10140494 162.41203249 161.70477107 189.31790424] [160.77450562 161.76610089 163.08727655 161.96217455 162.94759313 180.16580782] [160.80950928 161.81057947 163.01928779 162.36703812 162.99198931 184.53112027] [160.91467285 162.14410096 162.91730465 163.10929086 163.12514398 182.06414432] [161.35282898 162.24415232 163.15526531 162.92934768 163.67997768 186.80789454] [159.20602417 162.19967374 161.26283654 162.6144557 160.96129595 206.82574257] [160.940979 161.74387856 161.95408443 161.06247582 163.15843265 188.05821544] [161.80845642 163.04459712 163.94851363 163.09805084 164.25700946 185.3439104 ] [161.59819031 162.73331492 164.08449115 163.30047405 163.99068319 174.37210086] [160.91467285 162.49984478 163.5065694 163.09805084 163.12514398 189.24283563] [161.38783264 162.85560557 163.20060268 163.77281202 163.72435692 181.20171272] [161.49302673 162.63326356 164.1751486 163.53665161 163.85752852 169.76355778] [160.16984558 161.33252804 161.80678121 162.27706653 162.18191992 199.55094672] [156.75247192 159.39813391 158.41853054 160.28650304 157.85422386 231.4102291 ] [155.98141479 157.0190388 156.4807644 157.42996087 156.87771114 219.76978623] [156.91021729 156.7744575 157.61395653 156.39531334 158.0539728 190.33624434] [155.40309143 155.94066225 156.20880936 155.76552938 156.14534352 234.72253189] [157.05041504 157.34143217 157.77260273 157.17130328 158.23150671 193.88052792] [156.13035583 156.76332938 156.25412944 155.12448824 157.06635256 225.55563085] [152.61659241 155.74054257 152.75256993 156.20413015 152.61659241 250.71477904] [153.68563843 152.92787461 153.06986234 152.8077756 153.97038798 213.1492373 ] [153.49282837 152.61659241 152.61659241 152.61659241 153.7262598 212.82223931] [155.51699829 154.75110622 154.32770629 153.51629114 156.28958877 199.71015907] [157.44468689 157.40813307 157.17201229 156.58649653 158.73085365 215.56431345] [157.73388672 157.63049205 158.84914908 158.14971642 159.09704592 182.70626317] [159.46012878 160.26527961 159.78968333 158.66704876 161.28309204 193.41046046] [159.53898621 160.69885247 161.68212939 161.01749861 161.38297498 183.75132686] [160.36265564 161.11018603 160.68491479 159.43178153 162.4260481 186.18718801] [161.24765015 162.39979342 162.76998414 161.68101977 163.54682301 184.49603249] [161.44041443 162.54432336 163.88050758 163.18800527 163.79095119 177.78899784] [160.37141418 163.05570828 162.99661911 162.7156673 162.43715561 197.05314874] [161.31777954 162.57767382 162.69066968 162.16461491 163.63559843 188.05068767] [161.13374329 162.98900738 164.05049677 163.49165724 163.40256083 193.53688519] [162.02752686 164.38977729 164.50376671 163.30047405 164.53442631 191.84209124] [161.96615601 163.76721289 164.53776109 164.17767559 164.45674147 193.10554392]] [[162.82492065]]
preds_all =[]
delta_all = []
X_test_sqr = np.reshape(X_test,(y_test.shape[0],sl,sl,6))
for i in range(len(models)):
if i == 1:
y_pred = models[i].predict(X_test_sqr)
else:
y_pred = models[i].predict(X_test)
y_pred_ivt = scaler_single.inverse_transform(y_pred.reshape(-1, 1))
y_test_ivt = scaler_single.inverse_transform(y_test[i].reshape(-1, 1))
delta = abs(y_pred_ivt-y_test_ivt)
preds_all.append(y_pred)
delta_all.append(delta)
stats.describe(delta)
DescribeResult(nobs=1679, minmax=(array([0.11209106]), array([185.84675598])), mean=array([69.65563383]), variance=array([2546.06340233]), skewness=array([0.33279645]), kurtosis=array([-1.10060131]))
import matplotlib.pyplot as plt
x = delta
plt.hist(x, density=True, bins=100,cumulative=True) # `density=False` would make counts
plt.ylabel('value')
plt.xlabel('Data')
plt.figure()
plt.hist(x, density=True, bins=100,cumulative=False) # `density=False` would make counts
plt.ylabel('value')
plt.xlabel('Data')
Text(0.5, 0, 'Data')
'''
from itertools import chain
newlist = list(chain(*delta))
val_max = max(newlist)
val_min = min(newlist)
print(len(newlist))
print(max(newlist))
print(min(newlist))
'''
'\nfrom itertools import chain\nnewlist = list(chain(*delta))\nval_max = max(newlist)\nval_min = min(newlist)\nprint(len(newlist))\nprint(max(newlist))\nprint(min(newlist))\n'
#arms={0: model_lstm, 1:model_dnn, 2:model_cnn }
#scores={0:1,1:1,2:1,3:1,4:1}
epsilon = 0.10
#history = pd.DataFrame(data=None, columns=['time','action','prediction','reward','explore','score','count'])
history = pd.DataFrame(data=None, columns=['time','action','prediction','reward','explore','score','count'])
hisplot = pd.DataFrame(data=None, columns=arms_name)
y_history= []
for i in range(len(X_test)):
explore = np.random.binomial(1, epsilon)
if explore == 1 or i == 0:
action = np.random.choice(len(arms))
else:
action = max(scores, key= lambda x: scores[x])
if action == cnnInput1 or action == cnnInput2 :
x_test = X_test_sqr[i:i+1]
else:
x_test= X_test[i:i+1]
#print(action)
y_pred = arms[action](x_test)
loss_avg = np.mean(abs(y_test[i]- y_pred)) # average loss is the reward here
reward = np.exp(-loss_avg)
#reward = -loss_avg
y_pred_iv = float(scaler_single.inverse_transform(y_pred))
y_test_iv = float(scaler_single.inverse_transform(y_test[i].reshape(-1, 1)))
loss_iv = abs(y_pred_iv-y_test_iv)
reward = float(1-(loss_iv- val_min)/ val_max)
#print(reward)
history.loc[i]=[i,action,y_pred[0].numpy(),reward,bool(explore),None,None]
score = history[['action', 'reward']].groupby('action').agg({'reward': ['mean', 'count']})
mu, ct = score.loc[action]
history['score'].loc[i] = mu
history['count'].loc[i] = ct
scores[action] = mu
y_history.append(y_pred[0])
#for histplot
#hisplot.loc[i,'label'] = y_test_iv
hisplot.loc[i,arms_name[action]] = y_pred_iv
#y_history = history['prediction'].values
y_pred_iv = scaler_single.inverse_transform(y_history)
y_test_iv = scaler_single.inverse_transform(y_test.reshape(-1, 1))
# Visualising the results
plt.figure(figsize=(14,5))
plt.plot(y_test_iv, color = 'red', label = 'Actual SPY Price')
plt.plot(y_pred_iv, color = 'blue', label = 'Predicted SPY Price')
plt.title('SPY Price Prediction')
plt.xlabel('Time')
plt.ylabel('SPY Price')
plt.legend()
plt.show()
hisplot.plot.line(figsize=(14,5))
print("The R2 score on the Test set is:\t{:0.3f}".format(r2_score(y_test_iv, y_pred_iv)))
error_cnn = sum(abs(y_pred_iv-y_test_iv))
print('The error is: ' + str(error_cnn))
print('RMSE')
rmse= mean_squared_error(y_test_iv, y_pred_iv, squared=True)
print(rmse)
print('MSE')
mse= mean_squared_error(y_test_iv, y_pred_iv, squared=False)
print(rmse)
print('MAE')
mae = mean_absolute_error(y_test_iv, y_pred_iv)
print(mae)
print('MAPE')
mape = MAPE(y_test_iv,y_pred_iv)
print('R2')
r2 = r2_score(y_test_iv, y_pred_iv)
print(r2)
print('total absolute error')
tae = sum(abs(y_test_iv-y_pred_iv))[0]
print(tae)
#df_result['Epsilon-greedy bandit selected'] = [rmse,mae,r2,tae]
history[['action', 'reward']].groupby('action').describe()
The R2 score on the Test set is: 0.989 The error is: [6022.57558662] RMSE 27.50311680056825 MSE 27.50311680056825 MAE 3.587001540574294 MAPE R2 0.9890050901743946 total absolute error 6022.575586624254
| reward | ||||||||
|---|---|---|---|---|---|---|---|---|
| count | mean | std | min | 25% | 50% | 75% | max | |
| action | ||||||||
| 0 | 1549.0 | 0.644435 | 0.384892 | -2.915375 | 0.544027 | 0.752310 | 0.881235 | 1.000387 |
| 1 | 74.0 | 0.600190 | 0.349223 | -0.859320 | 0.478930 | 0.680779 | 0.861056 | 0.984499 |
| 2 | 56.0 | 0.627288 | 0.366082 | -0.794413 | 0.505205 | 0.732558 | 0.876605 | 0.988786 |
cumulative_regret = history_best['reward'].cumsum() -history['reward'].cumsum()
cumulative_regret.plot(figsize=(14,5))
plt.show()
regret_gap = (history_best['reward'].cumsum() -history['reward'].cumsum())/(history_best['time'].cumsum()+1)
regret_gap.plot(figsize=(14,5))
plt.show()
regret_gap[0:100].plot(figsize=(14,5))
plt.show()
y_pred_iv_pct = [((y_pred_iv[i]/y_pred_iv[i-1]) -1) *100 for i in range(1,len(y_pred_iv))]
y_test_iv_pct = [((y_test_iv[i]/y_test_iv[i-1]) -1) *100 for i in range(1,len(y_pred_iv))]
print('rate of change accuracy')
rc = sum([y_pred_iv_pct[i]*y_test_iv_pct[i]>0 for i in range(len(y_test_iv_pct))])/len(y_test_iv_pct)
print(rc[0])
df_result2['Epsilon-greedy bandit 3 selected'] =[rmse,mse,mae,mape,r2,tae]
# Visualising the results
plt.figure(figsize=(14,5))
plt.plot(y_test_iv_pct, color = 'red', label = 'Actual SPY change rate')
plt.plot(y_pred_iv_pct, color = 'blue', label = 'Predicted SPY change rate')
plt.title('SPY Price Prediction')
plt.xlabel('Time')
plt.ylabel('SPY Price change rate')
plt.legend()
plt.show()
plt.figure(figsize=(14,5))
plt.plot(y_test_iv_pct[:30], color = 'red', label = 'Actual SPY change rate')
plt.plot(y_pred_iv_pct[:30], color = 'blue', label = 'Predicted SPY change rate')
plt.title('SPY Price Prediction')
plt.xlabel('Time')
plt.ylabel('SPY Price change rate')
plt.legend()
ax = plt.axes()
plt.grid(axis='both', which='both')
plt.show()
rate of change accuracy 0.5405244338498212
df_result2.round(2).T
| 0 | 1 | 2 | 3 | 4 | 5 | |
|---|---|---|---|---|---|---|
| Unnamed: 0 | RMSE | MSE | MAE | MAPE | R2 | error_abs_sum |
| LSTM | 27.2 | 5.22 | 3.55 | 1.5 | 0.99 | 5954.81 |
| LSTM_deeper | 39.82 | 6.31 | 4.85 | 1.99 | 0.98 | 8142.69 |
| DNN | 152.74 | 12.36 | 9.02 | 3.5 | 0.94 | 15139.2 |
| DNN_deeper | 557.81 | 23.62 | 17.55 | 6.54 | 0.78 | 29462.2 |
| CNN | 40.29 | 6.35 | 4.42 | 1.94 | 0.98 | 7426.18 |
| CNN_deeper | 36.47 | 6.04 | 4.38 | 1.96 | 0.99 | 7347.95 |
| ResNet_5_layer | 31.19 | 5.58 | 3.99 | 1.75 | 0.99 | 6704.34 |
| ResNet_1_layer | 52.51 | 7.25 | 5.18 | 2.3 | 0.98 | 8691.5 |
| ResNet_deeper | 31.79 | 5.64 | 3.86 | 1.7 | 0.99 | 6474.5 |
| Epsilon-greedy bandit all models | 38.92 | 6.24 | 4.09 | 1.76 | 0.98 | 6875.02 |
| UCB1 bandit all models | 32.54 | 5.7 | 3.53 | 1.51 | 0.99 | 5924.68 |
| Exp3 bandit all models | 46.22 | 6.8 | 4.47 | 1.92 | 0.98 | 7505.18 |
| Epsilon-greedy bandit 5 selected | 27.28 | 5.22 | 3.49 | 1.47 | 0.99 | 5864.85 |
| UCB1 bandit 5 selected | 24.88 | 4.99 | 3.28 | 1.4 | 0.99 | 5513.51 |
| Exp3 bandit 5 selected | 30.76 | 5.55 | 3.95 | 1.68 | 0.99 | 6635.11 |
| Epsilon-greedy bandit 3 selected | 27.5 | 5.24 | 3.59 | 1.53 | 0.99 | 6022.58 |
for j in range(0, loop_test_range):
epsilon = 0.10
#history = pd.DataFrame(data=None, columns=['time','action','prediction','reward','explore','score','count'])
history = pd.DataFrame(data=None, columns=['time','action','prediction','reward','explore','score','count'])
hisplot = pd.DataFrame(data=None, columns=arms_name)
y_history= []
for i in range(len(X_test)):
explore = np.random.binomial(1, epsilon)
if explore == 1 or i == 0:
action = np.random.choice(len(arms))
else:
action = max(scores, key= lambda x: scores[x])
if action == cnnInput1 or action == cnnInput2 :
x_test = X_test_sqr[i:i+1]
else:
x_test= X_test[i:i+1]
#print(action)
y_pred = arms[action](x_test)
loss_avg = np.mean(abs(y_test[i]- y_pred)) # average loss is the reward here
reward = np.exp(-loss_avg)
#reward = -loss_avg
y_pred_iv = float(scaler_single.inverse_transform(y_pred))
y_test_iv = float(scaler_single.inverse_transform(y_test[i].reshape(-1, 1)))
loss_iv = abs(y_pred_iv-y_test_iv)
reward = float(1-(loss_iv- val_min)/ val_max)
#print(reward)
history.loc[i]=[i,action,y_pred[0].numpy(),reward,bool(explore),None,None]
score = history[['action', 'reward']].groupby('action').agg({'reward': ['mean', 'count']})
mu, ct = score.loc[action]
history['score'].loc[i] = mu
history['count'].loc[i] = ct
scores[action] = mu
y_history.append(y_pred[0])
#for histplot
#hisplot.loc[i,'label'] = y_test_iv
hisplot.loc[i,arms_name[action]] = y_pred_iv
#y_history = history['prediction'].values
y_pred_iv = scaler_single.inverse_transform(y_history)
y_test_iv = scaler_single.inverse_transform(y_test.reshape(-1, 1))
error_cnn = sum(abs(y_pred_iv-y_test_iv))
rmse= mean_squared_error(y_test_iv, y_pred_iv, squared=True)
mse= mean_squared_error(y_test_iv, y_pred_iv, squared=False)
mae = mean_absolute_error(y_test_iv, y_pred_iv)
mape = MAPE(y_test_iv,y_pred_iv)
r2 = r2_score(y_test_iv, y_pred_iv)
tae = sum(abs(y_test_iv-y_pred_iv))[0]
df_result_loop.loc[len(df_result_loop)] = ['Epsilon-greedy bandit 3 selected',rmse,mse,mae,mape,r2,tae]
#df_result_loop
#scores={0:1,1:1,2:1}
#scores_ucb1={0:5,1:5,2:5}
epsilon = 0.10
history = pd.DataFrame(data=None, columns=['time','action','prediction','reward','explore','score','count','score_ucb1'])
y_history= []
hisplot = pd.DataFrame(data=None, columns=arms_name)
for i in range(len(X_test)):
action = max(scores_ucb1, key= lambda x: scores_ucb1[x])
if action == cnnInput1 or action == cnnInput2:
x_test = X_test_sqr[i:i+1]
else:
x_test= X_test[i:i+1]
y_pred = arms[action](x_test)
loss_avg = np.mean(abs(y_test[i]- y_pred)) # average loss is the reward here
reward = np.exp(-loss_avg)
y_pred_iv = float(scaler_single.inverse_transform(y_pred))
y_test_iv = float(scaler_single.inverse_transform(y_test[i].reshape(-1, 1)))
loss_iv = abs(y_pred_iv-y_test_iv)
reward = float(1-(loss_iv- val_min)/ val_max)
history.loc[i]=[i,action,y_pred[0].numpy(),reward,'explore',None,None,None]
score = history[['action', 'reward']].groupby('action').agg({'reward': ['mean', 'count']})
ubc1_arr = []
for actionX in range(len(scores)):
try:
mu, ct = score.loc[actionX]
history['score'].loc[i] = mu
history['count'].loc[i] = ct
ucb1 = mu + np.sqrt((2*np.log10(i+1))/ct)
scores[actionX] = mu
scores_ucb1[actionX] = ucb1
ubc1_arr.append(ucb1)
except:
ubc1_arr.append(0)
history['score_ucb1'][i] = ubc1_arr
y_history.append(y_pred[0])
hisplot.loc[i,arms_name[action]] = y_pred_iv
#y_history = history['prediction'].values
y_pred_iv = scaler_single.inverse_transform(y_history)
y_test_iv = scaler_single.inverse_transform(y_test.reshape(-1, 1))
# Visualising the results
plt.figure(figsize=(14,5))
plt.plot(y_test_iv, color = 'red', label = 'Actual SPY Price')
plt.plot(y_pred_iv, color = 'blue', label = 'Predicted SPY Price')
plt.title('SPY Price Prediction')
plt.xlabel('Time')
plt.ylabel('SPY Price')
plt.legend()
plt.show()
hisplot.plot.line(figsize=(14,5))
print("The R2 score on the Test set is:\t{:0.3f}".format(r2_score(y_test_iv, y_pred_iv)))
error_cnn = sum(abs(y_pred_iv-y_test_iv))
print('The error is: ' + str(error_cnn))
print('RMSE')
rmse= mean_squared_error(y_test_iv, y_pred_iv, squared=True)
print(rmse)
print('MSE')
mse= mean_squared_error(y_test_iv, y_pred_iv, squared=False)
print(rmse)
print('MAE')
mae = mean_absolute_error(y_test_iv, y_pred_iv)
print(mae)
print('MAPE')
mape = MAPE(y_test_iv,y_pred_iv)
print('R2')
r2 = r2_score(y_test_iv, y_pred_iv)
print(r2)
print('total absolute error')
tae = sum(abs(y_test_iv-y_pred_iv))[0]
print(tae)
#df_result['UCB1 bandit selected'] = [rmse,mae,r2,tae]
history[['action', 'reward']].groupby('action').describe()
The R2 score on the Test set is: 0.989 The error is: [5971.76119631] RMSE 26.846679040294593 MSE 26.846679040294593 MAE 3.5567368649877995 MAPE R2 0.9892675140310312 total absolute error 5971.761196314529
| reward | ||||||||
|---|---|---|---|---|---|---|---|---|
| count | mean | std | min | 25% | 50% | 75% | max | |
| action | ||||||||
| 0 | 1055.0 | 0.668500 | 0.365841 | -2.344289 | 0.581994 | 0.768785 | 0.885830 | 1.000379 |
| 1 | 241.0 | 0.583062 | 0.421027 | -3.088228 | 0.482414 | 0.679708 | 0.825690 | 0.998022 |
| 2 | 383.0 | 0.618976 | 0.372410 | -1.522368 | 0.480865 | 0.733824 | 0.870697 | 0.999404 |
cumulative_regret = history_best['reward'].cumsum() -history['reward'].cumsum()
cumulative_regret.plot(figsize=(14,5))
plt.show()
regret_gap = (history_best['reward'].cumsum() -history['reward'].cumsum())/(history_best['time'].cumsum()+1)
regret_gap.plot(figsize=(14,5))
plt.show()
regret_gap[0:100].plot(figsize=(14,5))
plt.show()
y_pred_iv_pct = [((y_pred_iv[i]/y_pred_iv[i-1]) -1) *100 for i in range(1,len(y_pred_iv))]
y_test_iv_pct = [((y_test_iv[i]/y_test_iv[i-1]) -1) *100 for i in range(1,len(y_pred_iv))]
print('rate of change accuracy')
rc = sum([y_pred_iv_pct[i]*y_test_iv_pct[i]>0 for i in range(len(y_test_iv_pct))])/len(y_test_iv_pct)
print(rc[0])
df_result2['UCB1 bandit 3 selected'] = [rmse,mse,mae,mape,r2,tae]
# Visualising the results
plt.figure(figsize=(14,5))
plt.plot(y_test_iv_pct, color = 'red', label = 'Actual SPY change rate')
plt.plot(y_pred_iv_pct, color = 'blue', label = 'Predicted SPY change rate')
plt.title('SPY Price Prediction')
plt.xlabel('Time')
plt.ylabel('SPY Price change rate')
plt.legend()
plt.show()
plt.figure(figsize=(14,5))
plt.plot(y_test_iv_pct[:30], color = 'red', label = 'Actual SPY change rate')
plt.plot(y_pred_iv_pct[:30], color = 'blue', label = 'Predicted SPY change rate')
plt.title('SPY Price Prediction')
plt.xlabel('Time')
plt.ylabel('SPY Price change rate')
plt.legend()
ax = plt.axes()
plt.grid(axis='both', which='both')
plt.show()
rate of change accuracy 0.5274135876042908
df_result2.round(2).T
| 0 | 1 | 2 | 3 | 4 | 5 | |
|---|---|---|---|---|---|---|
| Unnamed: 0 | RMSE | MSE | MAE | MAPE | R2 | error_abs_sum |
| LSTM | 27.2 | 5.22 | 3.55 | 1.5 | 0.99 | 5954.81 |
| LSTM_deeper | 39.82 | 6.31 | 4.85 | 1.99 | 0.98 | 8142.69 |
| DNN | 152.74 | 12.36 | 9.02 | 3.5 | 0.94 | 15139.2 |
| DNN_deeper | 557.81 | 23.62 | 17.55 | 6.54 | 0.78 | 29462.2 |
| CNN | 40.29 | 6.35 | 4.42 | 1.94 | 0.98 | 7426.18 |
| CNN_deeper | 36.47 | 6.04 | 4.38 | 1.96 | 0.99 | 7347.95 |
| ResNet_5_layer | 31.19 | 5.58 | 3.99 | 1.75 | 0.99 | 6704.34 |
| ResNet_1_layer | 52.51 | 7.25 | 5.18 | 2.3 | 0.98 | 8691.5 |
| ResNet_deeper | 31.79 | 5.64 | 3.86 | 1.7 | 0.99 | 6474.5 |
| Epsilon-greedy bandit all models | 38.92 | 6.24 | 4.09 | 1.76 | 0.98 | 6875.02 |
| UCB1 bandit all models | 32.54 | 5.7 | 3.53 | 1.51 | 0.99 | 5924.68 |
| Exp3 bandit all models | 46.22 | 6.8 | 4.47 | 1.92 | 0.98 | 7505.18 |
| Epsilon-greedy bandit 5 selected | 27.28 | 5.22 | 3.49 | 1.47 | 0.99 | 5864.85 |
| UCB1 bandit 5 selected | 24.88 | 4.99 | 3.28 | 1.4 | 0.99 | 5513.51 |
| Exp3 bandit 5 selected | 30.76 | 5.55 | 3.95 | 1.68 | 0.99 | 6635.11 |
| Epsilon-greedy bandit 3 selected | 27.5 | 5.24 | 3.59 | 1.53 | 0.99 | 6022.58 |
| UCB1 bandit 3 selected | 26.85 | 5.18 | 3.56 | 1.52 | 0.99 | 5971.76 |
for j in range(0, loop_test_range):
epsilon = 0.10
history = pd.DataFrame(data=None, columns=['time','action','prediction','reward','explore','score','count','score_ucb1'])
y_history= []
hisplot = pd.DataFrame(data=None, columns=arms_name)
for i in range(len(X_test)):
action = max(scores_ucb1, key= lambda x: scores_ucb1[x])
if action == cnnInput1 or action == cnnInput2:
x_test = X_test_sqr[i:i+1]
else:
x_test= X_test[i:i+1]
y_pred = arms[action](x_test)
loss_avg = np.mean(abs(y_test[i]- y_pred)) # average loss is the reward here
reward = np.exp(-loss_avg)
y_pred_iv = float(scaler_single.inverse_transform(y_pred))
y_test_iv = float(scaler_single.inverse_transform(y_test[i].reshape(-1, 1)))
loss_iv = abs(y_pred_iv-y_test_iv)
reward = float(1-(loss_iv- val_min)/ val_max)
history.loc[i]=[i,action,y_pred[0].numpy(),reward,'explore',None,None,None]
score = history[['action', 'reward']].groupby('action').agg({'reward': ['mean', 'count']})
ubc1_arr = []
for actionX in range(len(scores)):
try:
mu, ct = score.loc[actionX]
history['score'].loc[i] = mu
history['count'].loc[i] = ct
ucb1 = mu + np.sqrt((2*np.log10(i+1))/ct)
scores[actionX] = mu
scores_ucb1[actionX] = ucb1
ubc1_arr.append(ucb1)
except:
ubc1_arr.append(0)
history['score_ucb1'][i] = ubc1_arr
y_history.append(y_pred[0])
hisplot.loc[i,arms_name[action]] = y_pred_iv
#y_history = history['prediction'].values
y_pred_iv = scaler_single.inverse_transform(y_history)
y_test_iv = scaler_single.inverse_transform(y_test.reshape(-1, 1))
error_cnn = sum(abs(y_pred_iv-y_test_iv))
rmse= mean_squared_error(y_test_iv, y_pred_iv, squared=True)
mse= mean_squared_error(y_test_iv, y_pred_iv, squared=False)
mae = mean_absolute_error(y_test_iv, y_pred_iv)
mape = MAPE(y_test_iv,y_pred_iv)
r2 = r2_score(y_test_iv, y_pred_iv)
tae = sum(abs(y_test_iv-y_pred_iv))[0]
df_result_loop.loc[len(df_result_loop)] = ['UCB1 bandit 3 selected',rmse,mse,mae,mape,r2,tae]
#df_result_loop
def distr(weights, gamma=0.0):
weight_sum = float(sum(weights))
return tuple((1.0 - gamma) * (w / weight_sum) + (gamma / len(weights)) for w in weights)
def draw(probability_distribution, arm):
arm = np.random.choice(list(arm.keys()), size=1,
p=probability_distribution, replace=False)
return int(arm)
def update_weights(weights, gamma, probability_distribution, action, reward):
# iter through actions. up to n updates / rec
'''
if actions.shape[0] == 0:
return weights
for a in range(actions.shape[0]):
action = actions[a:a+1]
weight_idx = movieId_weight_mapping[action.movieId.values[0]]
estimated_reward = 1.0 * action.liked.values[0] / probability_distribution[weight_idx]
weights[weight_idx] *= math.exp(estimated_reward * gamma / num_arms)
'''
num_arms = len(probability_distribution)
estimated_reward = 1.0 * reward / probability_distribution[action]
weights[action] *= np.exp(estimated_reward * gamma / num_arms)
#print(weights, np.argmax(weights))
return weights
history = pd.DataFrame(data=None, columns=['time','action','prediction','reward','explore','score','count','exp3_distr','exp3_weight'])
y_history= []
hisplot = pd.DataFrame(data=None, columns=arms_name)
weights = [1.0] * len(arms)
gamma = 0.2
for i in range(len(X_test)):
probability_distribution = distr(weights, gamma)
action = draw(probability_distribution, arm=arms)
if action == cnnInput1 or action == cnnInput2 :
x_test = X_test_sqr[i:i+1]
else:
x_test= X_test[i:i+1]
y_pred = arms[action](x_test)
loss_avg = np.mean(abs(y_test[i]- y_pred)) # average loss is the reward here
reward = np.exp(-loss_avg)
y_pred_iv = float(scaler_single.inverse_transform(y_pred))
y_test_iv = float(scaler_single.inverse_transform(y_test[i].reshape(-1, 1)))
loss_iv = abs(y_pred_iv-y_test_iv)
reward = float(1-(loss_iv- val_min)/ val_max)
weights = update_weights(weights, gamma, probability_distribution,action, reward)
history.loc[i]=[i,action,y_pred[0].numpy(),reward,bool(explore),None,None,None,None]
score = history[['action', 'reward']].groupby('action').agg({'reward': ['mean', 'count']})
mu, ct = score.loc[action]
history['score'].loc[i] = mu
history['count'].loc[i] = ct
history['exp3_weight'][i] = weights
history['exp3_distr'][i] = probability_distribution
y_history.append(y_pred[0])
hisplot.loc[i,arms_name[action]] = y_pred_iv
#y_history = history['prediction'].values
y_pred_iv = scaler_single.inverse_transform(y_history)
y_test_iv = scaler_single.inverse_transform(y_test.reshape(-1, 1))
# Visualising the results
plt.figure(figsize=(14,5))
plt.plot(y_test_iv, color = 'red', label = 'Actual SPY Price')
plt.plot(y_pred_iv, color = 'blue', label = 'Predicted SPY Price')
plt.title('SPY Price Prediction')
plt.xlabel('Time')
plt.ylabel('SPY Price')
plt.legend()
plt.show()
hisplot.plot.line(figsize=(14,5))
print("The R2 score on the Test set is:\t{:0.3f}".format(r2_score(y_test_iv, y_pred_iv)))
error_cnn = sum(abs(y_pred_iv-y_test_iv))
print('The error is: ' + str(error_cnn))
print('RMSE')
rmse= mean_squared_error(y_test_iv, y_pred_iv, squared=True)
print(rmse)
print('MSE')
mse= mean_squared_error(y_test_iv, y_pred_iv, squared=False)
print(rmse)
print('MAE')
mae = mean_absolute_error(y_test_iv, y_pred_iv)
print(mae)
print('MAPE')
mape = MAPE(y_test_iv,y_pred_iv)
print('R2')
r2 = r2_score(y_test_iv, y_pred_iv)
print(r2)
print('total absolute error')
tae = sum(abs(y_test_iv-y_pred_iv))[0]
print(tae)
#df_result['Exp3 bandit selected'] = [rmse,mae,r2,tae]
history[['action', 'reward']].groupby('action').describe()
The R2 score on the Test set is: 0.988 The error is: [6277.12517991] RMSE 29.198398677898332 MSE 29.198398677898332 MAE 3.7386093983959214 MAPE R2 0.9883273680272872 total absolute error 6277.125179906767
| reward | ||||||||
|---|---|---|---|---|---|---|---|---|
| count | mean | std | min | 25% | 50% | 75% | max | |
| action | ||||||||
| 0 | 1233.0 | 0.671258 | 0.354939 | -2.915375 | 0.576368 | 0.768785 | 0.886297 | 1.000387 |
| 1 | 233.0 | 0.443919 | 0.499470 | -2.185986 | 0.262310 | 0.559511 | 0.780578 | 0.999870 |
| 2 | 213.0 | 0.569121 | 0.385707 | -1.041713 | 0.381739 | 0.716153 | 0.849221 | 1.000502 |
cumulative_regret = history_best['reward'].cumsum() -history['reward'].cumsum()
cumulative_regret.plot(figsize=(14,5))
plt.show()
regret_gap = (history_best['reward'].cumsum() -history['reward'].cumsum())/(history_best['time'].cumsum()+1)
regret_gap.plot(figsize=(14,5))
plt.show()
regret_gap[0:100].plot(figsize=(14,5))
plt.show()
y_pred_iv_pct = [((y_pred_iv[i]/y_pred_iv[i-1]) -1) *100 for i in range(1,len(y_pred_iv))]
y_test_iv_pct = [((y_test_iv[i]/y_test_iv[i-1]) -1) *100 for i in range(1,len(y_pred_iv))]
print('rate of change accuracy')
rc = sum([y_pred_iv_pct[i]*y_test_iv_pct[i]>0 for i in range(len(y_test_iv_pct))])/len(y_test_iv_pct)
print(rc[0])
df_result2['Exp3 bandit 3 selected'] = [rmse,mse,mae,mape,r2,tae]
# Visualising the results
plt.figure(figsize=(14,5))
plt.plot(y_test_iv_pct, color = 'red', label = 'Actual SPY change rate')
plt.plot(y_pred_iv_pct, color = 'blue', label = 'Predicted SPY change rate')
plt.title('SPY Price Prediction')
plt.xlabel('Time')
plt.ylabel('SPY Price change rate')
plt.legend()
plt.show()
plt.figure(figsize=(14,5))
plt.plot(y_test_iv_pct[:30], color = 'red', label = 'Actual SPY change rate')
plt.plot(y_pred_iv_pct[:30], color = 'blue', label = 'Predicted SPY change rate')
plt.title('SPY Price Prediction')
plt.xlabel('Time')
plt.ylabel('SPY Price change rate')
plt.legend()
ax = plt.axes()
plt.grid(axis='both', which='both')
plt.show()
rate of change accuracy 0.5172824791418356
df_result2.round(2).T
| 0 | 1 | 2 | 3 | 4 | 5 | |
|---|---|---|---|---|---|---|
| Unnamed: 0 | RMSE | MSE | MAE | MAPE | R2 | error_abs_sum |
| LSTM | 27.2 | 5.22 | 3.55 | 1.5 | 0.99 | 5954.81 |
| LSTM_deeper | 39.82 | 6.31 | 4.85 | 1.99 | 0.98 | 8142.69 |
| DNN | 152.74 | 12.36 | 9.02 | 3.5 | 0.94 | 15139.2 |
| DNN_deeper | 557.81 | 23.62 | 17.55 | 6.54 | 0.78 | 29462.2 |
| CNN | 40.29 | 6.35 | 4.42 | 1.94 | 0.98 | 7426.18 |
| CNN_deeper | 36.47 | 6.04 | 4.38 | 1.96 | 0.99 | 7347.95 |
| ResNet_5_layer | 31.19 | 5.58 | 3.99 | 1.75 | 0.99 | 6704.34 |
| ResNet_1_layer | 52.51 | 7.25 | 5.18 | 2.3 | 0.98 | 8691.5 |
| ResNet_deeper | 31.79 | 5.64 | 3.86 | 1.7 | 0.99 | 6474.5 |
| Epsilon-greedy bandit all models | 38.92 | 6.24 | 4.09 | 1.76 | 0.98 | 6875.02 |
| UCB1 bandit all models | 32.54 | 5.7 | 3.53 | 1.51 | 0.99 | 5924.68 |
| Exp3 bandit all models | 46.22 | 6.8 | 4.47 | 1.92 | 0.98 | 7505.18 |
| Epsilon-greedy bandit 5 selected | 27.28 | 5.22 | 3.49 | 1.47 | 0.99 | 5864.85 |
| UCB1 bandit 5 selected | 24.88 | 4.99 | 3.28 | 1.4 | 0.99 | 5513.51 |
| Exp3 bandit 5 selected | 30.76 | 5.55 | 3.95 | 1.68 | 0.99 | 6635.11 |
| Epsilon-greedy bandit 3 selected | 27.5 | 5.24 | 3.59 | 1.53 | 0.99 | 6022.58 |
| UCB1 bandit 3 selected | 26.85 | 5.18 | 3.56 | 1.52 | 0.99 | 5971.76 |
| Exp3 bandit 3 selected | 29.2 | 5.4 | 3.74 | 1.62 | 0.99 | 6277.13 |
for j in range(0, loop_test_range):
def distr(weights, gamma=0.0):
weight_sum = float(sum(weights))
return tuple((1.0 - gamma) * (w / weight_sum) + (gamma / len(weights)) for w in weights)
def draw(probability_distribution, arm):
arm = np.random.choice(list(arm.keys()), size=1,
p=probability_distribution, replace=False)
return int(arm)
def update_weights(weights, gamma, probability_distribution, action, reward):
# iter through actions. up to n updates / rec
'''
if actions.shape[0] == 0:
return weights
for a in range(actions.shape[0]):
action = actions[a:a+1]
weight_idx = movieId_weight_mapping[action.movieId.values[0]]
estimated_reward = 1.0 * action.liked.values[0] / probability_distribution[weight_idx]
weights[weight_idx] *= math.exp(estimated_reward * gamma / num_arms)
'''
num_arms = len(probability_distribution)
estimated_reward = 1.0 * reward / probability_distribution[action]
weights[action] *= np.exp(estimated_reward * gamma / num_arms)
#print(weights, np.argmax(weights))
return weights
history = pd.DataFrame(data=None, columns=['time','action','prediction','reward','explore','score','count','exp3_distr','exp3_weight'])
y_history= []
hisplot = pd.DataFrame(data=None, columns=arms_name)
weights = [1.0] * len(arms)
gamma = 0.2
for i in range(len(X_test)):
probability_distribution = distr(weights, gamma)
action = draw(probability_distribution, arm=arms)
if action == cnnInput1 or action == cnnInput2 :
x_test = X_test_sqr[i:i+1]
else:
x_test= X_test[i:i+1]
y_pred = arms[action](x_test)
loss_avg = np.mean(abs(y_test[i]- y_pred)) # average loss is the reward here
reward = np.exp(-loss_avg)
y_pred_iv = float(scaler_single.inverse_transform(y_pred))
y_test_iv = float(scaler_single.inverse_transform(y_test[i].reshape(-1, 1)))
loss_iv = abs(y_pred_iv-y_test_iv)
reward = float(1-(loss_iv- val_min)/ val_max)
weights = update_weights(weights, gamma, probability_distribution,action, reward)
history.loc[i]=[i,action,y_pred[0].numpy(),reward,bool(explore),None,None,None,None]
score = history[['action', 'reward']].groupby('action').agg({'reward': ['mean', 'count']})
mu, ct = score.loc[action]
history['score'].loc[i] = mu
history['count'].loc[i] = ct
history['exp3_weight'][i] = weights
history['exp3_distr'][i] = probability_distribution
y_history.append(y_pred[0])
hisplot.loc[i,arms_name[action]] = y_pred_iv
#y_history = history['prediction'].values
y_pred_iv = scaler_single.inverse_transform(y_history)
y_test_iv = scaler_single.inverse_transform(y_test.reshape(-1, 1))
# Visualising the results
error_cnn = sum(abs(y_pred_iv-y_test_iv))
rmse= mean_squared_error(y_test_iv, y_pred_iv, squared=True)
mse= mean_squared_error(y_test_iv, y_pred_iv, squared=False)
mae = mean_absolute_error(y_test_iv, y_pred_iv)
mape = MAPE(y_test_iv,y_pred_iv)
r2 = r2_score(y_test_iv, y_pred_iv)
tae = sum(abs(y_test_iv-y_pred_iv))[0]
df_result_loop.loc[len(df_result_loop)] = ['Exp3 bandit 3 selected',rmse,mse,mae,mape,r2,tae]
#df_result_loop
df_result_loop.to_csv('df_result_loop.csv',index_label=False)
df_metrics= df_result2.round(2).T
df_metrics.rename(columns=df_metrics.iloc[0], inplace = True)
df_metrics.drop(df_metrics.index[0], inplace = True)
df_metrics=df_metrics[['RMSE' ,'MSE' ,'MAE' ,'MAPE' ,'R2']]
df_metrics.sort_values(by=['MSE'],inplace= True)
df_metrics.plot.barh(subplots=True,figsize=(14,30),grid=True, sharex=False, sharey=False, )
df_metrics
| RMSE | MSE | MAE | MAPE | R2 | |
|---|---|---|---|---|---|
| UCB1 bandit 5 selected | 24.88 | 4.99 | 3.28 | 1.4 | 0.99 |
| UCB1 bandit 3 selected | 26.85 | 5.18 | 3.56 | 1.52 | 0.99 |
| LSTM | 27.2 | 5.22 | 3.55 | 1.5 | 0.99 |
| Epsilon-greedy bandit 5 selected | 27.28 | 5.22 | 3.49 | 1.47 | 0.99 |
| Epsilon-greedy bandit 3 selected | 27.5 | 5.24 | 3.59 | 1.53 | 0.99 |
| Exp3 bandit 3 selected | 29.2 | 5.4 | 3.74 | 1.62 | 0.99 |
| Exp3 bandit 5 selected | 30.76 | 5.55 | 3.95 | 1.68 | 0.99 |
| ResNet_5_layer | 31.19 | 5.58 | 3.99 | 1.75 | 0.99 |
| ResNet_deeper | 31.79 | 5.64 | 3.86 | 1.7 | 0.99 |
| UCB1 bandit all models | 32.54 | 5.7 | 3.53 | 1.51 | 0.99 |
| CNN_deeper | 36.47 | 6.04 | 4.38 | 1.96 | 0.99 |
| Epsilon-greedy bandit all models | 38.92 | 6.24 | 4.09 | 1.76 | 0.98 |
| LSTM_deeper | 39.82 | 6.31 | 4.85 | 1.99 | 0.98 |
| CNN | 40.29 | 6.35 | 4.42 | 1.94 | 0.98 |
| Exp3 bandit all models | 46.22 | 6.8 | 4.47 | 1.92 | 0.98 |
| ResNet_1_layer | 52.51 | 7.25 | 5.18 | 2.3 | 0.98 |
| DNN | 152.74 | 12.36 | 9.02 | 3.5 | 0.94 |
| DNN_deeper | 557.81 | 23.62 | 17.55 | 6.54 | 0.78 |
import pandas as pd
df_result_loop = pd.read_csv('df_result_loop.csv')
df_result_loop = df_result_loop.drop(['Unnamed: 0'],axis =1)
df_result_loop = df_result_loop[['type','RMSE','MSE','MAE','MAPE','R2']]
df_result2 = pd.read_csv('df_result2.csv')
df_result_loop.tail()
| type | RMSE | MSE | MAE | MAPE | R2 | |
|---|---|---|---|---|---|---|
| 895 | Exp3 bandit 3 selected | 28.172574 | 5.307784 | 3.701903 | 1.593318 | 0.988737 |
| 896 | Exp3 bandit 3 selected | 29.636278 | 5.443921 | 3.759692 | 1.618629 | 0.988152 |
| 897 | Exp3 bandit 3 selected | 29.462338 | 5.427922 | 3.766576 | 1.629500 | 0.988222 |
| 898 | Exp3 bandit 3 selected | 30.092928 | 5.485702 | 3.714938 | 1.600194 | 0.987970 |
| 899 | Exp3 bandit 3 selected | 29.262527 | 5.409485 | 3.744774 | 1.614093 | 0.988302 |
df_result_loop_avg = df_result_loop.groupby('type').mean()
df_result_loop_avg =df_result_loop_avg.reset_index()
df_metrics= df_result_loop_avg.round(2)
df_metrics.set_index('type', inplace=True)
df_metrics.reset_index()
df_metrics=df_metrics[['RMSE','MSE','MAE','MAPE','R2']]
df_metrics2= df_result2.round(2).T
df_metrics2.rename(columns=df_metrics2.iloc[0], inplace = True)
df_metrics2.drop(df_metrics2.index[0], inplace = True)
df_metrics2=df_metrics2[['RMSE' ,'MSE' ,'MAE' ,'MAPE' ,'R2']]
df_metrics_nobandit = df_metrics2[0:9]
df_metrics_nobandit.index.name = 'type'
df_metrics_nobandit
frames = [df_metrics_nobandit, df_metrics]
df_metrics_loop = pd.concat(frames)
df_metrics_loop
rmse_true =df_metrics_loop['MSE'].values
mse_true = df_metrics_loop['RMSE'].values
df_metrics_loop['MSE'] =mse_true.astype('float')
df_metrics_loop['RMSE'] =rmse_true.astype('float')
df_metrics_loop.sort_values(by=['MSE'],inplace= True)
df_metrics_loop.plot.barh(subplots=True,figsize=(14,30),grid=True, sharex=False, sharey=False, )
df_metrics_loop
| RMSE | MSE | MAE | MAPE | R2 | |
|---|---|---|---|---|---|
| type | |||||
| UCB1 bandit 5 selected | 5.13 | 26.32 | 3.59 | 1.58 | 0.99 |
| Epsilon-greedy bandit 5 selected | 5.20 | 27.07 | 3.57 | 1.52 | 0.99 |
| LSTM | 5.22 | 27.20 | 3.55 | 1.5 | 0.99 |
| Epsilon-greedy bandit 3 selected | 5.29 | 27.94 | 3.61 | 1.54 | 0.99 |
| UCB1 bandit 3 selected | 5.31 | 28.24 | 3.64 | 1.56 | 0.99 |
| Exp3 bandit 3 selected | 5.41 | 29.33 | 3.75 | 1.61 | 0.99 |
| Exp3 bandit 5 selected | 5.49 | 30.16 | 3.85 | 1.66 | 0.99 |
| ResNet_5_layer | 5.58 | 31.19 | 3.99 | 1.75 | 0.99 |
| UCB1 bandit all models | 5.62 | 31.63 | 3.84 | 1.69 | 0.99 |
| ResNet_deeper | 5.64 | 31.79 | 3.86 | 1.7 | 0.99 |
| Epsilon-greedy bandit all models | 5.91 | 34.99 | 3.82 | 1.62 | 0.99 |
| CNN_deeper | 6.04 | 36.47 | 4.38 | 1.96 | 0.99 |
| LSTM_deeper | 6.31 | 39.82 | 4.85 | 1.99 | 0.98 |
| CNN | 6.35 | 40.29 | 4.42 | 1.94 | 0.98 |
| Exp3 bandit all models | 6.89 | 47.53 | 4.53 | 1.94 | 0.98 |
| ResNet_1_layer | 7.25 | 52.51 | 5.18 | 2.3 | 0.98 |
| DNN | 12.36 | 152.74 | 9.02 | 3.5 | 0.94 |
| DNN_deeper | 23.62 | 557.81 | 17.55 | 6.54 | 0.78 |
from datetime import datetime
datetime.now().strftime('%Y-%m-%d %H:%M:%S')
'2020-12-06 06:43:04'
rmse_true =df_result_loop['MSE'].values.copy()
mse_true = df_result_loop['RMSE'].values.copy()
df_result_loop['MSE'] =mse_true
df_result_loop['RMSE'] =rmse_true
df_result_loop
| type | RMSE | MSE | MAE | MAPE | R2 | |
|---|---|---|---|---|---|---|
| 0 | Epsilon-greedy bandit all models | 6.225196 | 38.753069 | 3.845201 | 1.620136 | 0.984508 |
| 1 | Epsilon-greedy bandit all models | 5.771402 | 33.309080 | 3.772170 | 1.605764 | 0.986684 |
| 2 | Epsilon-greedy bandit all models | 5.993758 | 35.925139 | 3.826355 | 1.623737 | 0.985638 |
| 3 | Epsilon-greedy bandit all models | 5.841185 | 34.119442 | 3.715736 | 1.590684 | 0.986360 |
| 4 | Epsilon-greedy bandit all models | 5.959080 | 35.510633 | 3.855263 | 1.639904 | 0.985804 |
| ... | ... | ... | ... | ... | ... | ... |
| 895 | Exp3 bandit 3 selected | 5.307784 | 28.172574 | 3.701903 | 1.593318 | 0.988737 |
| 896 | Exp3 bandit 3 selected | 5.443921 | 29.636278 | 3.759692 | 1.618629 | 0.988152 |
| 897 | Exp3 bandit 3 selected | 5.427922 | 29.462338 | 3.766576 | 1.629500 | 0.988222 |
| 898 | Exp3 bandit 3 selected | 5.485702 | 30.092928 | 3.714938 | 1.600194 | 0.987970 |
| 899 | Exp3 bandit 3 selected | 5.409485 | 29.262527 | 3.744774 | 1.614093 | 0.988302 |
900 rows × 6 columns
df_result_loop[['type','RMSE']].boxplot(by='type',figsize=(14,8),vert=False)
<AxesSubplot:title={'center':'RMSE'}, xlabel='[type]'>
df_result_loop[['type','MSE']].boxplot(by='type',figsize=(14,8),vert=False)
<AxesSubplot:title={'center':'MSE'}, xlabel='[type]'>
df_result_loop[['type','MAE']].boxplot(by='type',figsize=(14,8),vert=False)
<AxesSubplot:title={'center':'MAE'}, xlabel='[type]'>
df_result_loop[['type','MAPE']].boxplot(by='type',figsize=(14,8),vert=False)
<AxesSubplot:title={'center':'MAPE'}, xlabel='[type]'>
df_result_loop[['type','R2']].boxplot(by='type',figsize=(14,8),vert=False)
<AxesSubplot:title={'center':'R2'}, xlabel='[type]'>